diff --git a/.clang_format.hook b/.clang_format.hook
old mode 100644
new mode 100755
diff --git a/.gitignore b/.gitignore
old mode 100644
new mode 100755
diff --git a/.gitkeep b/.gitkeep
old mode 100644
new mode 100755
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
old mode 100644
new mode 100755
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/FAQ.md b/Dive-into-DL-paddlepaddle/FAQ.md
old mode 100644
new mode 100755
index 61fd244a7..f1b57eff9
--- a/Dive-into-DL-paddlepaddle/FAQ.md
+++ b/Dive-into-DL-paddlepaddle/FAQ.md
@@ -1,3 +1,3 @@
# FAQ
-大家好,本页面用来记录在使用飞桨核心框架开发本书籍时,开发者遇到的问题。
\ No newline at end of file
+大家好,本页面用来记录在使用飞桨核心框架开发本书籍时,开发者遇到的问题。
diff --git a/Dive-into-DL-paddlepaddle/README.md b/Dive-into-DL-paddlepaddle/README.md
old mode 100644
new mode 100755
index 2a6344e7b..680c42615
--- a/Dive-into-DL-paddlepaddle/README.md
+++ b/Dive-into-DL-paddlepaddle/README.md
@@ -29,7 +29,7 @@
## 原书地址
-中文版:[动手学深度学习](https://zh.d2l.ai/) | [Github仓库](https://github.com/d2l-ai/d2l-zh)
+中文版:[动手学深度学习](https://zh.d2l.ai/) | [Github仓库](https://github.com/d2l-ai/d2l-zh)
English Version: [Dive into Deep Learning](https://d2l.ai/) | [Github Repo](https://github.com/d2l-ai/d2l-en)
diff --git a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/attention-scoring-functions.md b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/attention-scoring-functions.md
old mode 100644
new mode 100755
index da3fe22e2..52700ce7b
--- a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/attention-scoring-functions.md
+++ b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/attention-scoring-functions.md
@@ -269,4 +269,3 @@ d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
[Discussions](https://discuss.d2l.ai/t/5752)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/bahdanau-attention.md b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/bahdanau-attention.md
old mode 100644
new mode 100755
index 1b7a2ba12..ac85afbe3
--- a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/bahdanau-attention.md
+++ b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/bahdanau-attention.md
@@ -224,4 +224,3 @@ d2l.show_heatmaps(
[Discussions](https://discuss.d2l.ai/t/5754)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/multihead-attention.md b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/multihead-attention.md
old mode 100644
new mode 100755
index c17c8b0b2..275427090
--- a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/multihead-attention.md
+++ b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/multihead-attention.md
@@ -179,4 +179,3 @@ attention(X, Y, Y, valid_lens).shape
[Discussions](https://discuss.d2l.ai/t/5758)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/nadaraya-waston.md b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/nadaraya-waston.md
old mode 100644
new mode 100755
index d09e767c6..8eb6b7a74
--- a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/nadaraya-waston.md
+++ b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/nadaraya-waston.md
@@ -321,4 +321,3 @@ d2l.show_heatmaps(net.attention_weight.unsqueeze(0).unsqueeze(0),
[Discussions](https://discuss.d2l.ai/t/5760)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/self-attention-and-positional-encoding.md b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/self-attention-and-positional-encoding.md
old mode 100644
new mode 100755
index 86f37d6f5..d27b7f4f6
--- a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/self-attention-and-positional-encoding.md
+++ b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/self-attention-and-positional-encoding.md
@@ -193,7 +193,7 @@ $$\begin{aligned}
\begin{bmatrix} p_{i, 2j} \\ p_{i, 2j+1} \\ \end{bmatrix}\\
=&\begin{bmatrix} \cos(\delta \omega_j) \sin(i \omega_j) + \sin(\delta \omega_j) \cos(i \omega_j) \\ -\sin(\delta \omega_j) \sin(i \omega_j) + \cos(\delta \omega_j) \cos(i \omega_j) \\ \end{bmatrix}\\
=&\begin{bmatrix} \sin\left((i+\delta) \omega_j\right) \\ \cos\left((i+\delta) \omega_j\right) \\ \end{bmatrix}\\
-=&
+=&
\begin{bmatrix} p_{i+\delta, 2j} \\ p_{i+\delta, 2j+1} \\ \end{bmatrix},
\end{aligned}$$
@@ -212,4 +212,3 @@ $2\times 2$投影矩阵不依赖于任何位置的索引$i$。
[Discussions](https://discuss.d2l.ai/t/5762)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/transformer.md b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/transformer.md
old mode 100644
new mode 100755
index 13e122a08..bd8665fde
--- a/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/transformer.md
+++ b/Dive-into-DL-paddlepaddle/docs/10_attention-mechanisms/transformer.md
@@ -422,4 +422,3 @@ d2l.show_heatmaps(
[Discussions](https://discuss.d2l.ai/t/5756)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/11_1optimization-intro.ipynb b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/11_1optimization-intro.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/11_2Convexity.ipynb b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/11_2Convexity.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/11_3gradient_descent.ipynb b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/11_3gradient_descent.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/convexity.md b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/convexity.md
old mode 100644
new mode 100755
index 9f06ab60c..ae22be903
--- a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/convexity.md
+++ b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/convexity.md
@@ -201,7 +201,7 @@ $$\lambda f(b) + (1-\lambda)f(a) \geq f((1-\lambda)a + \lambda b),$$
$f: \mathbb{R}^n \rightarrow \mathbb{R}$
是凸的当且仅当对于所有$\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$
-$$g(z) \stackrel{\mathrm{def}}{=} f(z \mathbf{x} + (1-z) \mathbf{y}) \text{ where } z \in [0,1]$$
+$$g(z) \stackrel{\mathrm{def}}{=} f(z \mathbf{x} + (1-z) \mathbf{y}) \text{ where } z \in [0,1]$$
是凸的。
@@ -317,7 +317,7 @@ $$\mathrm{Proj}_\mathcal{X}(\mathbf{x}) = \mathop{\mathrm{argmin}}_{\mathbf{x}'
* 凸约束可以通过拉格朗日函数来添加。在实践中,只需在目标函数中加上一个惩罚就可以了。
* 投影映射到凸集中最接近原始点的点。
-## 练习
+## 练习
1. 假设我们想要通过绘制集合内点之间的所有直线并检查这些直线是否包含来验证集合的凸性。
i.证明只检查边界上的点是充分的。
@@ -339,4 +339,3 @@ i.作为中间步骤,写出惩罚目标$|\mathbf{w} - \mathbf{w}'|_2^2 + \lamb
ii.你能无须反复试错就找到$\lambda$的“正确”值吗?
9. 给定一个凸集$\mathcal{X}$和两个向量$\mathbf{X}$和$\mathbf{y}$证明了投影不会增加距离,即$\|\mathbf{x} - \mathbf{y}\| \geq \|\mathrm{Proj}_\mathcal{X}(\mathbf{x}) - \mathrm{Proj}_\mathcal{X}(\mathbf{y})\|$。
-
diff --git a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/minibatch-sgd.md b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/minibatch-sgd.md
old mode 100644
new mode 100755
index d897aee31..ddb03e252
--- a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/minibatch-sgd.md
+++ b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/minibatch-sgd.md
@@ -352,4 +352,3 @@ train_concise_ch11(trainer, {'learning_rate': 0.01}, data_iter)
1. 修改批量大小和学习率,并观察目标函数值的下降率以及每个迭代轮数消耗的时间。
1. 将小批量随机梯度下降与实际从训练集中*取样替换*的变体进行比较。会看出什么?
1. 一个邪恶的精灵在没通知你的情况下复制了你的数据集(即每个观测发生两次,你的数据集增加到原始大小的两倍,但没有人告诉你)。随机梯度下降、小批量随机梯度下降和梯度下降的表现将如何变化?
-
diff --git a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/momentum.md b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/momentum.md
old mode 100644
new mode 100755
index f97e3fe5a..69cc85292
--- a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/momentum.md
+++ b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/momentum.md
@@ -332,4 +332,3 @@ $$
1. 试试梯度下降和动量法来解决一个二次问题,其中你有多个特征值,即$f(x) = \frac{1}{2} \sum_i \lambda_i x_i^2$,例如$\lambda_i = 2^{-i}$。绘制出$x$的值在初始化$x_i = 1$时如何下降。
1. 推导$h(\mathbf{x}) = \frac{1}{2} \mathbf{x}^\top \mathbf{Q} \mathbf{x} + \mathbf{x}^\top \mathbf{c} + b$的最小值和最小化器。
1. 当我们执行带动量法的随机梯度下降时会有什么变化?当我们使用带动量法的小批量随机梯度下降时会发生什么?试验参数如何?
-
diff --git a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/optimization-intro.md b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/optimization-intro.md
old mode 100644
new mode 100755
index 602111bc6..d1873bb93
--- a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/optimization-intro.md
+++ b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/optimization-intro.md
@@ -141,4 +141,3 @@ annotate('vanishing gradient', (4, 1), (2, 0.0))
1. 假设你想在(真实的)鞍上平衡一个(真实的)球。
1. 为什么这很难?
1. 你也能利用这种效应来优化算法吗?
-
diff --git a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/sgd.md b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/sgd.md
old mode 100644
new mode 100755
index 3326e0113..a6f1b6bff
--- a/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/sgd.md
+++ b/Dive-into-DL-paddlepaddle/docs/11_optimization_algorithm/sgd.md
@@ -211,4 +211,3 @@ $${n \choose 1} \frac{1}{n} \left(1-\frac{1}{n}\right)^{n-1} = \frac{n}{n-1} \le
1. 比较随机梯度下降的收敛性,当你从$\{(x_1, y_1), \ldots, (x_n, y_n)\}$使用替换方法进行采样时以及在不替换的情况下进行采样时
1. 如果某些梯度(或者更确切地说与之相关的某些坐标)始终比所有其他梯度都大,你将如何更改随机梯度下降求解器?
1. 假设是$f(x) = x^2 (1 + \sin x)$。$f$有多少局部最小值?你能改变$f$以尽量减少它需要评估所有局部最小值的方式吗?
-
diff --git a/Dive-into-DL-paddlepaddle/docs/1_introduction/qy.md b/Dive-into-DL-paddlepaddle/docs/1_introduction/qy.md
old mode 100644
new mode 100755
index 6bcd980d8..14c437e2e
--- a/Dive-into-DL-paddlepaddle/docs/1_introduction/qy.md
+++ b/Dive-into-DL-paddlepaddle/docs/1_introduction/qy.md
@@ -751,4 +751,3 @@ Canny边缘检测器 :cite:`Canny.1987` 和SIFT特征提取器 :cite:`Lowe.2004`
1. 你还可以在哪里应用端到端的训练方法,比如 :numref:`fig_ml_loop` 、物理、工程和计量经济学?
[Discussions](https://discuss.d2l.ai/t/2088)
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.1\346\225\260\346\215\256\346\223\215\344\275\234-checkpoint.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.1\346\225\260\346\215\256\346\223\215\344\275\234-checkpoint.ipynb"
old mode 100644
new mode 100755
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.2\346\225\260\346\215\256\351\242\204\345\244\204\347\220\206-checkpoint.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.2\346\225\260\346\215\256\351\242\204\345\244\204\347\220\206-checkpoint.ipynb"
old mode 100644
new mode 100755
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.3\347\272\277\346\200\247\344\273\243\346\225\260-checkpoint.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.3\347\272\277\346\200\247\344\273\243\346\225\260-checkpoint.ipynb"
old mode 100644
new mode 100755
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.4\345\276\256\345\210\206-checkpoint.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.4\345\276\256\345\210\206-checkpoint.ipynb"
old mode 100644
new mode 100755
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.5\350\207\252\345\212\250\346\261\202\345\257\274-checkpoint.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.5\350\207\252\345\212\250\346\261\202\345\257\274-checkpoint.ipynb"
old mode 100644
new mode 100755
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.7\346\237\245\351\230\205\346\226\207\346\241\243-checkpoint.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/.ipynb_checkpoints/2.7\346\237\245\351\230\205\346\226\207\346\241\243-checkpoint.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.1.md b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.1.md
old mode 100644
new mode 100755
index 9a129054e..870836751
--- a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.1.md
+++ b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.1.md
@@ -109,7 +109,7 @@ paddle.zeros((2, 3, 4))
[[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
-
+
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]]])
@@ -131,7 +131,7 @@ paddle.ones((2, 3, 4))
[[[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]],
-
+
[[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]]])
@@ -487,7 +487,7 @@ a, a.item(), float(a), int(a)
in
1 a = paddle.to_tensor([3.5])
----> 2 a, a.item(), float(a), int(a)
-
+
AttributeError: 'Tensor' object has no attribute 'item'
@@ -503,4 +503,3 @@ a, a.item(), float(a), int(a)
[Discussions](https://discuss.d2l.ai/t/1747)
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.1\346\225\260\346\215\256\346\223\215\344\275\234.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.1\346\225\260\346\215\256\346\223\215\344\275\234.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.2.md b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.2.md
old mode 100644
new mode 100755
index e4217e91e..85474108c
--- a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.2.md
+++ b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.2.md
@@ -119,4 +119,3 @@ X, y
[Discussions](https://discuss.d2l.ai/t/1750)
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.2\346\225\260\346\215\256\351\242\204\345\244\204\347\220\206.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.2\346\225\260\346\215\256\351\242\204\345\244\204\347\220\206.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.3.md b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.3.md
old mode 100644
new mode 100755
index 01bf27736..00cf2fdd3
--- a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.3.md
+++ b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.3.md
@@ -239,7 +239,7 @@ X
[[[0 , 1 , 2 , 3 ],
[4 , 5 , 6 , 7 ],
[8 , 9 , 10, 11]],
-
+
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
@@ -323,7 +323,7 @@ a + X, (a * X).shape
[[[2 , 3 , 4 , 5 ],
[6 , 7 , 8 , 9 ],
[10, 11, 12, 13]],
-
+
[[14, 15, 16, 17],
[18, 19, 20, 21],
[22, 23, 24, 25]]]), [2, 3, 4])
@@ -820,4 +820,3 @@ paddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))
[Discussions](https://discuss.d2l.ai/t/1751)
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.3\347\272\277\346\200\247\344\273\243\346\225\260.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.3\347\272\277\346\200\247\344\273\243\346\225\260.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.4.md b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.4.md
old mode 100644
new mode 100755
index 39b0020c5..0dbd6ccfd
--- a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.4.md
+++ b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.4.md
@@ -249,4 +249,3 @@ $$\frac{dy}{dx_i} = \frac{dy}{du_1} \frac{du_1}{dx_i} + \frac{dy}{du_2} \frac{du
[Discussions](https://discuss.d2l.ai/t/1756)
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.4\345\276\256\345\210\206.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.4\345\276\256\345\210\206.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.5.md b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.5.md
old mode 100644
new mode 100755
index 6c03ad36e..10805b8d7
--- a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.5.md
+++ b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.5.md
@@ -119,7 +119,7 @@ x.grad
```python
x.clear_gradient()
y = x * x
-paddle.sum(y).backward()
+paddle.sum(y).backward()
x.grad
```
@@ -234,4 +234,3 @@ a.grad == d / a
[Discussions](https://discuss.d2l.ai/t/1759)
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.5\350\207\252\345\212\250\346\261\202\345\257\274.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.5\350\207\252\345\212\250\346\261\202\345\257\274.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.7.md b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.7.md
old mode 100644
new mode 100755
index faa86fd96..432e0266b
--- a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.7.md
+++ b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.7.md
@@ -32,40 +32,40 @@ help(paddle.ones)
```
Help on function ones in module paddle.tensor.creation:
-
+
ones(shape, dtype=None, name=None)
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
-
+
Args:
shape(tuple|list|Tensor): Shape of the Tensor to be created, the data type of shape is int32 or int64.
dtype(np.dtype|str, optional): Data type of output Tensor, it supports
bool, float16, float32, float64, int32 and int64. Default: if None, the data type is 'float32'.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
-
+
Returns:
Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
-
+
Examples:
.. code-block:: python
-
- import paddle
-
+
+ import paddle
+
# default dtype for ones OP
- data1 = paddle.ones(shape=[3, 2])
+ data1 = paddle.ones(shape=[3, 2])
# [[1. 1.]
# [1. 1.]
# [1. 1.]]
-
- data2 = paddle.ones(shape=[2, 2], dtype='int32')
+
+ data2 = paddle.ones(shape=[2, 2], dtype='int32')
# [[1 1]
# [1 1]]
-
+
# shape is a Tensor
shape = paddle.full(shape=[2], dtype='int32', fill_value=2)
- data3 = paddle.ones(shape=shape, dtype='int32')
+ data3 = paddle.ones(shape=shape, dtype='int32')
# [[1 1]
# [1 1]]
-
+
从文档中,我们可以看到 `ones` 函数创建一个具有指定形状的新张量,并将所有元素值设置为 1。让我们来[**运行一个快速测试**]来确认这一解释:
@@ -97,4 +97,3 @@ paddle.ones([4], dtype='float32')
[Discussions](https://discuss.d2l.ai/t/1765)
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.7\346\237\245\351\230\205\346\226\207\346\241\243.ipynb" "b/Dive-into-DL-paddlepaddle/docs/2_Preparatory-knowledge/2.7\346\237\245\351\230\205\346\226\207\346\241\243.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.1.md b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.1.md
old mode 100644
new mode 100755
index 3260a7927..44e344f1e
--- a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.1.md
+++ b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.1.md
@@ -314,4 +314,3 @@ $$-\log P(\mathbf y \mid \mathbf X) = \sum_{i=1}^n \frac{1}{2} \log(2 \pi \sigma
[Discussions](https://discuss.d2l.ai/t/1775)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.1linear-regression.ipynb b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.1linear-regression.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.2.md b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.2.md
old mode 100644
new mode 100755
index 14b64a82e..e52de4ac6
--- a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.2.md
+++ b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.2.md
@@ -246,7 +246,7 @@ def squared_loss(y_hat, y):
```python
-def sgd(params, lr, batch_size):
+def sgd(params, lr, batch_size):
"""小批量随机梯度下降。"""
with torch.no_grad():
for param in params:
@@ -275,7 +275,7 @@ def pddle_sgd(params, lr, batch_size):
- 初始化参数
- 重复,直到完成
- 计算梯度 $g \leftarrow \partial_{(w,b)}\frac{1}{\vert B\vert}\sum_{i \in B}l(x^{(i)},y^{(i)},w,b)$
- - 更新参数$(w,b) \leftarrow (w,b) \leftarrow \eta g$
+ - 更新参数$(w,b) \leftarrow (w,b) \leftarrow \eta g$
在每个迭代周期(epoch)中,我们使用`data_iter`函数遍历整个数据集,并将训练数据集中所有样本都使用一次(假设样本数能够被批量大小整除)。这里的迭代周期个数`num_epochs`和学习率`lr`都是超参数,分别设为3和0.03。设置超参数很棘手,需要通过反复试验进行调整。 我们现在忽略这些细节,以后会在 2节 中详细介绍。
diff --git a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.3.md b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.3.md
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.4.md b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.4.md
old mode 100644
new mode 100755
index c56923746..a67cd997d
--- a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.4.md
+++ b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.4.md
@@ -203,4 +203,3 @@ $$H[P] = \sum_j - P(j) \log P(j).$$
1. 将其扩展到两个以上的数字。
[Discussions](https://discuss.d2l.ai/t/1785)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.4softmax-regression.ipynb b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.4softmax-regression.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.5.md b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.5.md
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.6.md b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.6.md
old mode 100644
new mode 100755
index fe23c8265..1b50841d4
--- a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.6.md
+++ b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.6.md
@@ -102,7 +102,7 @@ paddle_X.sum(0, keepdim=True), paddle_X.sum(1, keepdim=True)
```
我们现在已经准备好实现softmax操作了。回想一下,softmax由三个步骤组成:
-
+
* 对每个项求幂(使用exp);
* 对每一行求和(小批量中每个样本是一行),得到每个样本的归一化常数;
@@ -111,7 +111,7 @@ paddle_X.sum(0, keepdim=True), paddle_X.sum(1, keepdim=True)
在查看代码之前,让我们回顾一下这个表达式
$$softmax(X)_{ij}=\frac{exp(X_{ij})}{\sum_k exp(X_{ik})}$$
-
+
分母或归一化常数,有时也称为配分函数(其对数称为对数-配分函数)。该名称的起源来自统计物理学中一个模拟粒子群分布的方程。
@@ -221,7 +221,7 @@ cross_entropy(paddle_y_hat, paddle_y)
```python
-def accuracy(y_hat, y):
+def accuracy(y_hat, y):
"""计算预测正确的数量。"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
@@ -255,7 +255,7 @@ def evaluate_accuracy(net, data_iter):
```python
-class Accumulator:
+class Accumulator:
"""在`n`个变量上累加。"""
def __init__(self, n):
self.data = [0.0] * n
@@ -396,7 +396,7 @@ class Animator:
```python
-def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
+def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""训练模型(定义见第3章)。"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
diff --git a/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.7.md b/Dive-into-DL-paddlepaddle/docs/3_linear-networks/3.7.md
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.1.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.1.md
old mode 100644
new mode 100755
index af97e3b52..5142299d0
--- a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.1.md
+++ b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.1.md
@@ -131,9 +131,9 @@ d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.
C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
- C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\paddle\fluid\dygraph\varbase_patch_methods.py:382: UserWarning:
+ C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\paddle\fluid\dygraph\varbase_patch_methods.py:382: UserWarning:
Warning:
- tensor.grad will return the tensor value of the gradient. This is an incompatible upgrade for tensor.grad API. It's return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. If you want to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`
+ tensor.grad will return the tensor value of the gradient. This is an incompatible upgrade for tensor.grad API. It's return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. If you want to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`
warnings.warn(warning_msg)
C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\d2l\torch.py:41: DeprecationWarning: `set_matplotlib_formats` is deprecated since IPython 7.23, directly use `matplotlib_inline.backend_inline.set_matplotlib_formats()`
display.set_matplotlib_formats('svg')
@@ -195,9 +195,9 @@ d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5,
C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
- C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\paddle\fluid\dygraph\varbase_patch_methods.py:382: UserWarning:
+ C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\paddle\fluid\dygraph\varbase_patch_methods.py:382: UserWarning:
Warning:
- tensor.grad will return the tensor value of the gradient. This is an incompatible upgrade for tensor.grad API. It's return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. If you want to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`
+ tensor.grad will return the tensor value of the gradient. This is an incompatible upgrade for tensor.grad API. It's return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. If you want to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`
warnings.warn(warning_msg)
C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\d2l\torch.py:41: DeprecationWarning: `set_matplotlib_formats` is deprecated since IPython 7.23, directly use `matplotlib_inline.backend_inline.set_matplotlib_formats()`
display.set_matplotlib_formats('svg')
@@ -249,9 +249,9 @@ d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.
C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
- C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\paddle\fluid\dygraph\varbase_patch_methods.py:382: UserWarning:
+ C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\paddle\fluid\dygraph\varbase_patch_methods.py:382: UserWarning:
Warning:
- tensor.grad will return the tensor value of the gradient. This is an incompatible upgrade for tensor.grad API. It's return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. If you want to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`
+ tensor.grad will return the tensor value of the gradient. This is an incompatible upgrade for tensor.grad API. It's return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. If you want to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`
warnings.warn(warning_msg)
C:\Users\WeiWu-GU\anaconda3\envs\pte\lib\site-packages\d2l\torch.py:41: DeprecationWarning: `set_matplotlib_formats` is deprecated since IPython 7.23, directly use `matplotlib_inline.backend_inline.set_matplotlib_formats()`
display.set_matplotlib_formats('svg')
@@ -277,4 +277,3 @@ d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.
[Discussions](https://discuss.d2l.ai/t/1796)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.10.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.10.md
old mode 100644
new mode 100755
index dd0400cc8..68918772f
--- a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.10.md
+++ b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.10.md
@@ -383,4 +383,3 @@ train_and_pred(train_features, test_features, train_labels, test_data,
[Discussions](https://discuss.d2l.ai/t/1824)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.10kaggle-house-price.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.10kaggle-house-price.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.1mlp.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.1mlp.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.2.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.2.md
old mode 100644
new mode 100755
index eac9e2d2e..a9cab23f0
--- a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.2.md
+++ b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.2.md
@@ -105,7 +105,7 @@ pd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
1 num_epochs, lr = 10, 0.1
2 updater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)
----> 3 pd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
-
+
D:\workspace\d2ltopaddle\chapter_multilayer-perceptrons\pd2l.py in train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
324 legend=['train loss', 'train acc', 'test acc'])
@@ -127,13 +127,13 @@ pd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
2 X = X.reshape((-1, num_inputs))
3 H = relu(X@W1 + b1) # 这里“@”代表矩阵乘法
----> 4 return (H@W2 + b2)
-
+
~\anaconda3\envs\pte\lib\site-packages\paddle\fluid\dygraph\math_op_patch.py in __impl__(self, other_var)
248 axis = -1
249 math_op = getattr(core.ops, op_type)
--> 250 return math_op(self, other_var, 'axis', axis)
- 251
+ 251
252 comment = OpProtoHolder.instance().get_op_proto(op_type).comment
@@ -170,4 +170,3 @@ d2l.predict_ch3(net, test_iter)
[Discussions](https://discuss.d2l.ai/t/1804)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.2mlp-scratch.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.2mlp-scratch.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.3.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.3.md
old mode 100644
new mode 100755
index 1d3e2f589..301466fa3
--- a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.3.md
+++ b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.3.md
@@ -64,4 +64,3 @@ train_iter, test_iter = pd2l.load_data_fashion_mnist(batch_size)
[Discussions](https://discuss.d2l.ai/t/1802)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.3mlp-concise.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.3mlp-concise.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.4.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.4.md
old mode 100644
new mode 100755
index 3490bd3f6..1b4bc7557
--- a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.4.md
+++ b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.4.md
@@ -266,8 +266,8 @@ train(poly_features[:n_train, :4], poly_features[n_train:, :4],
273 metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
274 # Return training loss and training accuracy
--> 275 return metric[0] / metric[2], metric[1] / metric[2]
- 276
- 277
+ 276
+ 277
ZeroDivisionError: float division by zero
@@ -343,4 +343,3 @@ train(poly_features[:n_train, :], poly_features[n_train:, :],
[Discussions](https://discuss.d2l.ai/t/1806)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.4underfit-overfit.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.4underfit-overfit.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.5.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.5.md
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.5weight-decay.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.5weight-decay.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.6.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.6.md
old mode 100644
new mode 100755
index e0d2b9c62..fc0316537
--- a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.6.md
+++ b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.6.md
@@ -78,7 +78,7 @@ def dropout_layer(X, dropout):
# 在本情况中,所有元素都被保留。
if dropout == 0:
return X
-
+
mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')
return mask * X / (1.0 - dropout)
```
@@ -211,4 +211,3 @@ d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
[Discussions](https://discuss.d2l.ai/t/1813)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.6dropout.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.6dropout.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.7.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.7.md
old mode 100644
new mode 100755
index 54dd5b5fa..c8b5f46e1
--- a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.7.md
+++ b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.7.md
@@ -130,4 +130,3 @@ $$
1. 与小批量训练相比,有哪些优点和缺点?
[Discussions](https://discuss.d2l.ai/t/1816)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.7backprop.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.7backprop.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.8.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.8.md
old mode 100644
new mode 100755
index ef10c3810..322a60c7f
--- a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.8.md
+++ b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.8.md
@@ -127,4 +127,3 @@ $$U\left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mat
[Discussions](https://discuss.d2l.ai/t/1818)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.8numerical-stability-and-init.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.8numerical-stability-and-init.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.9.md b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.9.md
old mode 100644
new mode 100755
index eb10e44e9..2ba6e215a
--- a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.9.md
+++ b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.9.md
@@ -249,4 +249,3 @@ $$
4. 除了分布偏移,还有什么会影响经验风险接近真实风险的程度?
[Discussions](https://discuss.d2l.ai/t/1822)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.9environment.ipynb b/Dive-into-DL-paddlepaddle/docs/4_multilayer-perceptrons/4.9environment.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.1.ipynb b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.1.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.1.md b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.1.md
old mode 100644
new mode 100755
index fde7fc27c..6bc9273cf
--- a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.1.md
+++ b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.1.md
@@ -17,7 +17,7 @@
```python
-import paddle
+import paddle
from paddle import nn
from paddle.nn import functional as F
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.2.ipynb b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.2.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.2.md b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.2.md
old mode 100644
new mode 100755
index 65407885b..a12547e04
--- a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.2.md
+++ b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.2.md
@@ -209,8 +209,8 @@ print(rgnet[0].state_dict()['block 0.0.bias'])
def init_normal(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Normal(mean=0.0, std=0.01)
- paddle.zeros(m.bias)
-
+ paddle.zeros(m.bias)
+
net.apply(init_normal)
net[0].weight[0],net[0].state_dict()['bias']
@@ -235,7 +235,7 @@ def init_constant(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Constant(value=1)
paddle.zeros(m.bias)
-
+
#nn.init.normal_(m.weight, mean=0, std=0.01)
#nn.init.zeros_(m.bias)
@@ -261,12 +261,12 @@ net[0].weight[0],net[0].state_dict()['bias']
def xavier(m):
if type(m) == nn.Linear:
paddle.nn.initializer.XavierUniform(m.weight)
-
+
def init_42(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Constant(42)
-
+
net[0].apply(xavier)
net[2].apply(init_42)
@@ -309,7 +309,7 @@ def my_init(m):
h=paddle.to_tensor(h)
m=paddle.to_tensor(m.weight)
m*=h
-
+
net.apply(my_init)
net[0].weight[:2]
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.4.ipynb b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.4.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.4.md b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.4.md
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.5.ipynb b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.5.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.5.md b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.5.md
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.6.ipynb b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.6.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.6.md b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.6.md
old mode 100644
new mode 100755
index a11d58037..f2790cd48
--- a/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.6.md
+++ b/Dive-into-DL-paddlepaddle/docs/5_deep-learning-computation/5.6.md
@@ -10,7 +10,7 @@
!nvidia-smi
```
- Fri Aug 6 23:48:38 2021
+ Fri Aug 6 23:48:38 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
@@ -26,7 +26,7 @@
| 22% 35C P8 22W / 250W | 1MiB / 11019MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
-
+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
@@ -227,7 +227,7 @@ Z is Z
```python
-import paddle
+import paddle
import paddle.nn as nn
paddle.device.set_device("gpu:0")
net = nn.Sequential(nn.Linear(3, 1))
diff --git a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.1.md b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.1.md
old mode 100644
new mode 100755
index 810f6c426..05f8b1ab2
--- a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.1.md
+++ b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.1.md
@@ -163,4 +163,3 @@ $$[\mathsf{H}]_{i,j,d} = \sum_{a = -\Delta}^{\Delta} \sum_{b = -\Delta}^{\Delta}
1. 证明在 :eqref:`eq_2d-conv-discrete` 中, $f * g = g * f$ 。
[Discussions](https://discuss.d2l.ai/t/1846)
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.1\344\273\216\345\205\250\350\277\236\346\216\245\345\261\202\345\210\260\345\215\267\347\247\257.ipynb" "b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.1\344\273\216\345\205\250\350\277\236\346\216\245\345\261\202\345\210\260\345\215\267\347\247\257.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.2.md b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.2.md
old mode 100644
new mode 100755
index 205ddcd8a..868785000
--- a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.2.md
+++ b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.2.md
@@ -233,4 +233,3 @@ corr2d(X.t(), K)
1. 二阶导数的核形式是什么?
1. 积分的核形式是什么?
1. 得到 $d$ 次导数的最小核大小是多少?
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.2\345\233\276\345\203\217\345\215\267\347\247\257.ipynb" "b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.2\345\233\276\345\203\217\345\215\267\347\247\257.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.3.md b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.3.md
old mode 100644
new mode 100755
index 23cd2478a..8a3817137
--- a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.3.md
+++ b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.3.md
@@ -157,4 +157,3 @@ comp_conv2d(conv2d, X).shape
1. 在本节中的实验中,试一试其他填充和步幅组合。
1. 对于音频信号,步幅 $2$ 说明什么?
1. 步幅大于 $1$ 的计算优势是什么?
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.3\345\241\253\345\205\205\345\222\214\346\255\245\345\271\205.ipynb" "b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.3\345\241\253\345\205\205\345\222\214\346\255\245\345\271\205.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.4.md b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.4.md
old mode 100644
new mode 100755
index 9334d0018..164955682
--- a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.4.md
+++ b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.4.md
@@ -29,7 +29,7 @@ import paddle
def corr2d(X, K):
"""2D相关计算"""
- h, w = K.shape
+ h, w = K.shape
Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
@@ -47,15 +47,15 @@ def corr2d_multi_in(X, K):
```python
-X = paddle.to_tensor([[[0, 1, 2],
- [3, 4, 5],
+X = paddle.to_tensor([[[0, 1, 2],
+ [3, 4, 5],
[6, 7, 8]],
- [[1, 2, 3],
- [4, 5, 6],
+ [[1, 2, 3],
+ [4, 5, 6],
[7, 8, 9]]], dtype='float32')
-K = paddle.to_tensor([[[0, 1],
- [2, 3]],
- [[1, 2],
+K = paddle.to_tensor([[[0, 1],
+ [2, 3]],
+ [[1, 2],
[3, 4]]], dtype='float32')
# print(X.shape, K.shape)
@@ -115,10 +115,10 @@ corr2d_multi_in_out(X, K)
Tensor(shape=[3, 2, 2], dtype=float32, place=CPUPlace, stop_gradient=True,
[[[56. , 72. ],
[104., 120.]],
-
+
[[76. , 100.],
[148., 172.]],
-
+
[[96. , 128.],
[192., 224.]]])
@@ -199,4 +199,3 @@ Y2 = corr2d_multi_in_out(X, K)
1. 如果卷积核的高度和宽度是 $k_h=k_w=1$,前向传播的计算复杂度是多少?
1. 本节最后一个示例中的变量 `Y1` 和 `Y2` 是否完全相同?为什么?
1. 当卷积窗口不是 $1\times 1$ 时,如何使用矩阵乘法实现卷积?
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.4\345\244\232\350\276\223\345\205\245\345\244\232\350\276\223\345\207\272\351\200\232\351\201\223.ipynb" "b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.4\345\244\232\350\276\223\345\205\245\345\244\232\350\276\223\345\207\272\351\200\232\351\201\223.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.5.md b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.5.md
old mode 100644
new mode 100755
index 7ced0b658..77d03998b
--- a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.5.md
+++ b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.5.md
@@ -56,7 +56,7 @@ import paddle
in
----> 1 import paddle
-
+
ModuleNotFoundError: No module named 'paddle'
@@ -71,7 +71,7 @@ def pool2d(X, pool_size, mode='max'):
if mode == 'max':
Y[i, j] = X[i: i + p_h, j: j + p_w].max()
elif mode == 'avg':
- Y[i, j] = X[i: i + p_h, j: j + p_w].mean()
+ Y[i, j] = X[i: i + p_h, j: j + p_w].mean()
return Y
```
@@ -207,7 +207,7 @@ X
[4. , 5. , 6. , 7. ],
[8. , 9. , 10., 11.],
[12., 13., 14., 15.]],
-
+
[[1. , 2. , 3. , 4. ],
[5. , 6. , 7. , 8. ],
[9. , 10., 11., 12.],
@@ -230,7 +230,7 @@ pool2d(X)
Tensor(shape=[1, 2, 2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
[[[[5. , 7. ],
[13., 15.]],
-
+
[[6. , 8. ],
[14., 16.]]]])
@@ -253,4 +253,3 @@ pool2d(X)
1. 为什么最大池化层和平均池化层的工作方式不同?
1. 我们是否需要最小池化层?可以用已知函数替换它吗?
1. 除了平均池化层和最大池化层,是否有其它函数可以考虑(提示:回忆 `softmax` )?为什么它可能不受欢迎?
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.5\346\261\240\345\214\226\345\261\202.ipynb" "b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.5\346\261\240\345\214\226\345\261\202.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.6.md b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.6.md
old mode 100644
new mode 100755
index 7615a104f..141143085
--- a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.6.md
+++ b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.6.md
@@ -124,7 +124,7 @@ def load_data_fashion_mnist(batch_size, resize=None):
if resize:
trans.append(transforms.Resize(size=resize))
trans.append(transforms.ToTensor())
-
+
transform = transforms.Compose(trans)
mnist_train = datasets.FashionMNIST(mode='train', download=True, transform=transform)
mnist_test = datasets.FashionMNIST(mode='test', download=True, transform=transform)
@@ -163,9 +163,9 @@ def evaluate_accuracy(data_iter, net):
else: # 自定义的模型, 3.13节之后不会用到, 不考虑GPU
if('is_training' in net.__code__.co_varnames): # 如果有is_training这个参数
# 将is_training设置成False
- acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
+ acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
else:
- acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
+ acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
```
@@ -200,7 +200,7 @@ def train_ch6(net, train_iter, test_iter, batch_size, optimi, num_epochs):
batch_count += 1
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
- % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
+ % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
```
现在,我们[**训练和评估LeNet-5模型**]。
@@ -233,4 +233,3 @@ train_ch6(net, train_iter, test_iter, batch_size, optimi, num_epochs)
1. 调整学习率和其他训练细节(例如,初始化和周期数)。
1. 在 MNIST 数据集上尝试以上改进的网络。
1. 显示不同输入(例如毛衣和外套)时,LeNet 第一层和第二层的激活值。
-
diff --git "a/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.6\345\215\267\347\247\257\347\245\236\347\273\217\347\275\221\347\273\234LeNet.ipynb" "b/Dive-into-DL-paddlepaddle/docs/6_Convolutional-Neural-Network/6.6\345\215\267\347\247\257\347\245\236\347\273\217\347\275\221\347\273\234LeNet.ipynb"
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.1.md b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.1.md
old mode 100644
new mode 100755
index a2561dfad..5d5900f68
--- a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.1.md
+++ b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.1.md
@@ -147,29 +147,29 @@ print(paddle.summary(AlexNet, (1, 1, 224, 224)))
```
---------------------------------------------------------------------------
- Layer (type) Input Shape Output Shape Param #
+ Layer (type) Input Shape Output Shape Param #
===========================================================================
- Conv2D-1 [[1, 1, 224, 224]] [1, 96, 54, 54] 11,712
- ReLU-1 [[1, 96, 54, 54]] [1, 96, 54, 54] 0
- MaxPool2D-1 [[1, 96, 54, 54]] [1, 96, 26, 26] 0
- Conv2D-2 [[1, 96, 26, 26]] [1, 256, 26, 26] 614,656
- ReLU-2 [[1, 256, 26, 26]] [1, 256, 26, 26] 0
- MaxPool2D-2 [[1, 256, 26, 26]] [1, 256, 12, 12] 0
- Conv2D-3 [[1, 256, 12, 12]] [1, 384, 12, 12] 885,120
- ReLU-3 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
- Conv2D-4 [[1, 384, 12, 12]] [1, 384, 12, 12] 1,327,488
- ReLU-4 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
- Conv2D-5 [[1, 384, 12, 12]] [1, 256, 12, 12] 884,992
- ReLU-5 [[1, 256, 12, 12]] [1, 256, 12, 12] 0
- MaxPool2D-3 [[1, 256, 12, 12]] [1, 256, 5, 5] 0
- Flatten-1 [[1, 256, 5, 5]] [1, 6400] 0
- Linear-1 [[1, 6400]] [1, 4096] 26,218,496
- ReLU-6 [[1, 4096]] [1, 4096] 0
- Dropout-1 [[1, 4096]] [1, 4096] 0
- Linear-2 [[1, 4096]] [1, 4096] 16,781,312
- ReLU-7 [[1, 4096]] [1, 4096] 0
- Dropout-2 [[1, 4096]] [1, 4096] 0
- Linear-3 [[1, 4096]] [1, 10] 40,970
+ Conv2D-1 [[1, 1, 224, 224]] [1, 96, 54, 54] 11,712
+ ReLU-1 [[1, 96, 54, 54]] [1, 96, 54, 54] 0
+ MaxPool2D-1 [[1, 96, 54, 54]] [1, 96, 26, 26] 0
+ Conv2D-2 [[1, 96, 26, 26]] [1, 256, 26, 26] 614,656
+ ReLU-2 [[1, 256, 26, 26]] [1, 256, 26, 26] 0
+ MaxPool2D-2 [[1, 256, 26, 26]] [1, 256, 12, 12] 0
+ Conv2D-3 [[1, 256, 12, 12]] [1, 384, 12, 12] 885,120
+ ReLU-3 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
+ Conv2D-4 [[1, 384, 12, 12]] [1, 384, 12, 12] 1,327,488
+ ReLU-4 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
+ Conv2D-5 [[1, 384, 12, 12]] [1, 256, 12, 12] 884,992
+ ReLU-5 [[1, 256, 12, 12]] [1, 256, 12, 12] 0
+ MaxPool2D-3 [[1, 256, 12, 12]] [1, 256, 5, 5] 0
+ Flatten-1 [[1, 256, 5, 5]] [1, 6400] 0
+ Linear-1 [[1, 6400]] [1, 4096] 26,218,496
+ ReLU-6 [[1, 4096]] [1, 4096] 0
+ Dropout-1 [[1, 4096]] [1, 4096] 0
+ Linear-2 [[1, 4096]] [1, 4096] 16,781,312
+ ReLU-7 [[1, 4096]] [1, 4096] 0
+ Dropout-2 [[1, 4096]] [1, 4096] 0
+ Linear-3 [[1, 4096]] [1, 10] 40,970
===========================================================================
Total params: 46,764,746
Trainable params: 46,764,746
@@ -180,7 +180,7 @@ print(paddle.summary(AlexNet, (1, 1, 224, 224)))
Params size (MB): 178.39
Estimated Total Size (MB): 188.81
---------------------------------------------------------------------------
-
+
{'total_params': 46764746, 'trainable_params': 46764746}
@@ -323,4 +323,3 @@ model.fit(train_dataset, val_dataset, epochs=num_epochs, batch_size=batch_size,
[Discussions](https://discuss.d2l.ai/t/1863)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.1_AlexNet.ipynb b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.1_AlexNet.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.2.md b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.2.md
old mode 100644
new mode 100755
index e313d9960..668eabc2b
--- a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.2.md
+++ b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.2.md
@@ -89,37 +89,37 @@ print(paddle.summary(VGG, (1, 1, 224, 224)))
```
---------------------------------------------------------------------------
- Layer (type) Input Shape Output Shape Param #
+ Layer (type) Input Shape Output Shape Param #
===========================================================================
- Conv2D-1 [[1, 1, 224, 224]] [1, 64, 224, 224] 640
- ReLU-1 [[1, 64, 224, 224]] [1, 64, 224, 224] 0
- MaxPool2D-1 [[1, 64, 224, 224]] [1, 64, 112, 112] 0
- Conv2D-2 [[1, 64, 112, 112]] [1, 128, 112, 112] 73,856
- ReLU-2 [[1, 128, 112, 112]] [1, 128, 112, 112] 0
- MaxPool2D-2 [[1, 128, 112, 112]] [1, 128, 56, 56] 0
- Conv2D-3 [[1, 128, 56, 56]] [1, 256, 56, 56] 295,168
- ReLU-3 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
- Conv2D-4 [[1, 256, 56, 56]] [1, 256, 56, 56] 590,080
- ReLU-4 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
- MaxPool2D-3 [[1, 256, 56, 56]] [1, 256, 28, 28] 0
- Conv2D-5 [[1, 256, 28, 28]] [1, 512, 28, 28] 1,180,160
- ReLU-5 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
- Conv2D-6 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,359,808
- ReLU-6 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
- MaxPool2D-4 [[1, 512, 28, 28]] [1, 512, 14, 14] 0
- Conv2D-7 [[1, 512, 14, 14]] [1, 512, 14, 14] 2,359,808
- ReLU-7 [[1, 512, 14, 14]] [1, 512, 14, 14] 0
- Conv2D-8 [[1, 512, 14, 14]] [1, 512, 14, 14] 2,359,808
- ReLU-8 [[1, 512, 14, 14]] [1, 512, 14, 14] 0
- MaxPool2D-5 [[1, 512, 14, 14]] [1, 512, 7, 7] 0
- Flatten-1 [[1, 512, 7, 7]] [1, 25088] 0
+ Conv2D-1 [[1, 1, 224, 224]] [1, 64, 224, 224] 640
+ ReLU-1 [[1, 64, 224, 224]] [1, 64, 224, 224] 0
+ MaxPool2D-1 [[1, 64, 224, 224]] [1, 64, 112, 112] 0
+ Conv2D-2 [[1, 64, 112, 112]] [1, 128, 112, 112] 73,856
+ ReLU-2 [[1, 128, 112, 112]] [1, 128, 112, 112] 0
+ MaxPool2D-2 [[1, 128, 112, 112]] [1, 128, 56, 56] 0
+ Conv2D-3 [[1, 128, 56, 56]] [1, 256, 56, 56] 295,168
+ ReLU-3 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
+ Conv2D-4 [[1, 256, 56, 56]] [1, 256, 56, 56] 590,080
+ ReLU-4 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
+ MaxPool2D-3 [[1, 256, 56, 56]] [1, 256, 28, 28] 0
+ Conv2D-5 [[1, 256, 28, 28]] [1, 512, 28, 28] 1,180,160
+ ReLU-5 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
+ Conv2D-6 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,359,808
+ ReLU-6 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
+ MaxPool2D-4 [[1, 512, 28, 28]] [1, 512, 14, 14] 0
+ Conv2D-7 [[1, 512, 14, 14]] [1, 512, 14, 14] 2,359,808
+ ReLU-7 [[1, 512, 14, 14]] [1, 512, 14, 14] 0
+ Conv2D-8 [[1, 512, 14, 14]] [1, 512, 14, 14] 2,359,808
+ ReLU-8 [[1, 512, 14, 14]] [1, 512, 14, 14] 0
+ MaxPool2D-5 [[1, 512, 14, 14]] [1, 512, 7, 7] 0
+ Flatten-1 [[1, 512, 7, 7]] [1, 25088] 0
Linear-1 [[1, 25088]] [1, 4096] 102,764,544
- ReLU-9 [[1, 4096]] [1, 4096] 0
- Dropout-1 [[1, 4096]] [1, 4096] 0
- Linear-2 [[1, 4096]] [1, 4096] 16,781,312
- ReLU-10 [[1, 4096]] [1, 4096] 0
- Dropout-2 [[1, 4096]] [1, 4096] 0
- Linear-3 [[1, 4096]] [1, 10] 40,970
+ ReLU-9 [[1, 4096]] [1, 4096] 0
+ Dropout-1 [[1, 4096]] [1, 4096] 0
+ Linear-2 [[1, 4096]] [1, 4096] 16,781,312
+ ReLU-10 [[1, 4096]] [1, 4096] 0
+ Dropout-2 [[1, 4096]] [1, 4096] 0
+ Linear-3 [[1, 4096]] [1, 10] 40,970
===========================================================================
Total params: 128,806,154
Trainable params: 128,806,154
@@ -130,7 +130,7 @@ print(paddle.summary(VGG, (1, 1, 224, 224)))
Params size (MB): 491.36
Estimated Total Size (MB): 616.92
---------------------------------------------------------------------------
-
+
{'total_params': 128806154, 'trainable_params': 128806154}
@@ -198,4 +198,3 @@ model.fit(train_dataset, val_dataset, epochs=num_epochs, batch_size=batch_size,
[Discussions](https://discuss.d2l.ai/t/1866)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.2_VGG.ipynb b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.2_VGG.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.3.md b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.3.md
old mode 100644
new mode 100755
index 49775cc70..2e10439a5
--- a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.3.md
+++ b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.3.md
@@ -73,38 +73,38 @@ print(paddle.summary(NiN, (1, 1, 224, 224)))
```
-------------------------------------------------------------------------------
- Layer (type) Input Shape Output Shape Param #
+ Layer (type) Input Shape Output Shape Param #
===============================================================================
- Conv2D-1 [[1, 1, 224, 224]] [1, 96, 54, 54] 11,712
- ReLU-1 [[1, 96, 54, 54]] [1, 96, 54, 54] 0
- Conv2D-2 [[1, 96, 54, 54]] [1, 96, 54, 54] 9,312
- ReLU-2 [[1, 96, 54, 54]] [1, 96, 54, 54] 0
- Conv2D-3 [[1, 96, 54, 54]] [1, 96, 54, 54] 9,312
- ReLU-3 [[1, 96, 54, 54]] [1, 96, 54, 54] 0
- MaxPool2D-1 [[1, 96, 54, 54]] [1, 96, 26, 26] 0
- Conv2D-4 [[1, 96, 26, 26]] [1, 256, 26, 26] 614,656
- ReLU-4 [[1, 256, 26, 26]] [1, 256, 26, 26] 0
- Conv2D-5 [[1, 256, 26, 26]] [1, 256, 26, 26] 65,792
- ReLU-5 [[1, 256, 26, 26]] [1, 256, 26, 26] 0
- Conv2D-6 [[1, 256, 26, 26]] [1, 256, 26, 26] 65,792
- ReLU-6 [[1, 256, 26, 26]] [1, 256, 26, 26] 0
- MaxPool2D-2 [[1, 256, 26, 26]] [1, 256, 12, 12] 0
- Conv2D-7 [[1, 256, 12, 12]] [1, 384, 12, 12] 885,120
- ReLU-7 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
- Conv2D-8 [[1, 384, 12, 12]] [1, 384, 12, 12] 147,840
- ReLU-8 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
- Conv2D-9 [[1, 384, 12, 12]] [1, 384, 12, 12] 147,840
- ReLU-9 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
- MaxPool2D-3 [[1, 384, 12, 12]] [1, 384, 5, 5] 0
- Dropout-1 [[1, 384, 5, 5]] [1, 384, 5, 5] 0
- Conv2D-10 [[1, 384, 5, 5]] [1, 10, 5, 5] 34,570
- ReLU-10 [[1, 10, 5, 5]] [1, 10, 5, 5] 0
- Conv2D-11 [[1, 10, 5, 5]] [1, 10, 5, 5] 110
- ReLU-11 [[1, 10, 5, 5]] [1, 10, 5, 5] 0
- Conv2D-12 [[1, 10, 5, 5]] [1, 10, 5, 5] 110
- ReLU-12 [[1, 10, 5, 5]] [1, 10, 5, 5] 0
- AdaptiveAvgPool2D-1 [[1, 10, 5, 5]] [1, 10, 1, 1] 0
- Flatten-1 [[1, 10, 1, 1]] [1, 10] 0
+ Conv2D-1 [[1, 1, 224, 224]] [1, 96, 54, 54] 11,712
+ ReLU-1 [[1, 96, 54, 54]] [1, 96, 54, 54] 0
+ Conv2D-2 [[1, 96, 54, 54]] [1, 96, 54, 54] 9,312
+ ReLU-2 [[1, 96, 54, 54]] [1, 96, 54, 54] 0
+ Conv2D-3 [[1, 96, 54, 54]] [1, 96, 54, 54] 9,312
+ ReLU-3 [[1, 96, 54, 54]] [1, 96, 54, 54] 0
+ MaxPool2D-1 [[1, 96, 54, 54]] [1, 96, 26, 26] 0
+ Conv2D-4 [[1, 96, 26, 26]] [1, 256, 26, 26] 614,656
+ ReLU-4 [[1, 256, 26, 26]] [1, 256, 26, 26] 0
+ Conv2D-5 [[1, 256, 26, 26]] [1, 256, 26, 26] 65,792
+ ReLU-5 [[1, 256, 26, 26]] [1, 256, 26, 26] 0
+ Conv2D-6 [[1, 256, 26, 26]] [1, 256, 26, 26] 65,792
+ ReLU-6 [[1, 256, 26, 26]] [1, 256, 26, 26] 0
+ MaxPool2D-2 [[1, 256, 26, 26]] [1, 256, 12, 12] 0
+ Conv2D-7 [[1, 256, 12, 12]] [1, 384, 12, 12] 885,120
+ ReLU-7 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
+ Conv2D-8 [[1, 384, 12, 12]] [1, 384, 12, 12] 147,840
+ ReLU-8 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
+ Conv2D-9 [[1, 384, 12, 12]] [1, 384, 12, 12] 147,840
+ ReLU-9 [[1, 384, 12, 12]] [1, 384, 12, 12] 0
+ MaxPool2D-3 [[1, 384, 12, 12]] [1, 384, 5, 5] 0
+ Dropout-1 [[1, 384, 5, 5]] [1, 384, 5, 5] 0
+ Conv2D-10 [[1, 384, 5, 5]] [1, 10, 5, 5] 34,570
+ ReLU-10 [[1, 10, 5, 5]] [1, 10, 5, 5] 0
+ Conv2D-11 [[1, 10, 5, 5]] [1, 10, 5, 5] 110
+ ReLU-11 [[1, 10, 5, 5]] [1, 10, 5, 5] 0
+ Conv2D-12 [[1, 10, 5, 5]] [1, 10, 5, 5] 110
+ ReLU-12 [[1, 10, 5, 5]] [1, 10, 5, 5] 0
+ AdaptiveAvgPool2D-1 [[1, 10, 5, 5]] [1, 10, 1, 1] 0
+ Flatten-1 [[1, 10, 1, 1]] [1, 10] 0
===============================================================================
Total params: 1,992,166
Trainable params: 1,992,166
@@ -115,7 +115,7 @@ print(paddle.summary(NiN, (1, 1, 224, 224)))
Params size (MB): 7.60
Estimated Total Size (MB): 31.99
-------------------------------------------------------------------------------
-
+
{'total_params': 1992166, 'trainable_params': 1992166}
@@ -182,4 +182,3 @@ model.fit(train_dataset, val_dataset, epochs=num_epochs, batch_size=batch_size,
[Discussions](https://discuss.d2l.ai/t/1869)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.3_NiN.ipynb b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.3_NiN.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.4.md b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.4.md
old mode 100644
new mode 100755
index b5e0c9a58..7f0f119ff
--- a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.4.md
+++ b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.4.md
@@ -143,92 +143,92 @@ print(paddle.summary(GoogLeNet, (1, 1, 96, 96)))
```
-------------------------------------------------------------------------------
- Layer (type) Input Shape Output Shape Param #
+ Layer (type) Input Shape Output Shape Param #
===============================================================================
- Conv2D-1 [[1, 1, 96, 96]] [1, 64, 48, 48] 3,200
- ReLU-1 [[1, 64, 48, 48]] [1, 64, 48, 48] 0
- MaxPool2D-1 [[1, 64, 48, 48]] [1, 64, 24, 24] 0
- Conv2D-2 [[1, 64, 24, 24]] [1, 64, 24, 24] 4,160
- ReLU-2 [[1, 64, 24, 24]] [1, 64, 24, 24] 0
- Conv2D-3 [[1, 64, 24, 24]] [1, 192, 24, 24] 110,784
- MaxPool2D-2 [[1, 192, 24, 24]] [1, 192, 12, 12] 0
- Conv2D-4 [[1, 192, 12, 12]] [1, 64, 12, 12] 12,352
- Conv2D-5 [[1, 192, 12, 12]] [1, 96, 12, 12] 18,528
- Conv2D-6 [[1, 96, 12, 12]] [1, 128, 12, 12] 110,720
- Conv2D-7 [[1, 192, 12, 12]] [1, 16, 12, 12] 3,088
- Conv2D-8 [[1, 16, 12, 12]] [1, 32, 12, 12] 12,832
- MaxPool2D-3 [[1, 192, 12, 12]] [1, 192, 12, 12] 0
- Conv2D-9 [[1, 192, 12, 12]] [1, 32, 12, 12] 6,176
- Inception-1 [[1, 192, 12, 12]] [1, 256, 12, 12] 0
- Conv2D-10 [[1, 256, 12, 12]] [1, 128, 12, 12] 32,896
- Conv2D-11 [[1, 256, 12, 12]] [1, 128, 12, 12] 32,896
- Conv2D-12 [[1, 128, 12, 12]] [1, 192, 12, 12] 221,376
- Conv2D-13 [[1, 256, 12, 12]] [1, 32, 12, 12] 8,224
- Conv2D-14 [[1, 32, 12, 12]] [1, 96, 12, 12] 76,896
- MaxPool2D-4 [[1, 256, 12, 12]] [1, 256, 12, 12] 0
- Conv2D-15 [[1, 256, 12, 12]] [1, 64, 12, 12] 16,448
- Inception-2 [[1, 256, 12, 12]] [1, 480, 12, 12] 0
- MaxPool2D-5 [[1, 480, 12, 12]] [1, 480, 6, 6] 0
- Conv2D-16 [[1, 480, 6, 6]] [1, 192, 6, 6] 92,352
- Conv2D-17 [[1, 480, 6, 6]] [1, 96, 6, 6] 46,176
- Conv2D-18 [[1, 96, 6, 6]] [1, 208, 6, 6] 179,920
- Conv2D-19 [[1, 480, 6, 6]] [1, 16, 6, 6] 7,696
- Conv2D-20 [[1, 16, 6, 6]] [1, 48, 6, 6] 19,248
- MaxPool2D-6 [[1, 480, 6, 6]] [1, 480, 6, 6] 0
- Conv2D-21 [[1, 480, 6, 6]] [1, 64, 6, 6] 30,784
- Inception-3 [[1, 480, 6, 6]] [1, 512, 6, 6] 0
- Conv2D-22 [[1, 512, 6, 6]] [1, 160, 6, 6] 82,080
- Conv2D-23 [[1, 512, 6, 6]] [1, 112, 6, 6] 57,456
- Conv2D-24 [[1, 112, 6, 6]] [1, 224, 6, 6] 226,016
- Conv2D-25 [[1, 512, 6, 6]] [1, 24, 6, 6] 12,312
- Conv2D-26 [[1, 24, 6, 6]] [1, 64, 6, 6] 38,464
- MaxPool2D-7 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
- Conv2D-27 [[1, 512, 6, 6]] [1, 64, 6, 6] 32,832
- Inception-4 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
- Conv2D-28 [[1, 512, 6, 6]] [1, 128, 6, 6] 65,664
- Conv2D-29 [[1, 512, 6, 6]] [1, 128, 6, 6] 65,664
- Conv2D-30 [[1, 128, 6, 6]] [1, 256, 6, 6] 295,168
- Conv2D-31 [[1, 512, 6, 6]] [1, 24, 6, 6] 12,312
- Conv2D-32 [[1, 24, 6, 6]] [1, 64, 6, 6] 38,464
- MaxPool2D-8 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
- Conv2D-33 [[1, 512, 6, 6]] [1, 64, 6, 6] 32,832
- Inception-5 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
- Conv2D-34 [[1, 512, 6, 6]] [1, 112, 6, 6] 57,456
- Conv2D-35 [[1, 512, 6, 6]] [1, 144, 6, 6] 73,872
- Conv2D-36 [[1, 144, 6, 6]] [1, 288, 6, 6] 373,536
- Conv2D-37 [[1, 512, 6, 6]] [1, 32, 6, 6] 16,416
- Conv2D-38 [[1, 32, 6, 6]] [1, 64, 6, 6] 51,264
- MaxPool2D-9 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
- Conv2D-39 [[1, 512, 6, 6]] [1, 64, 6, 6] 32,832
- Inception-6 [[1, 512, 6, 6]] [1, 528, 6, 6] 0
- Conv2D-40 [[1, 528, 6, 6]] [1, 256, 6, 6] 135,424
- Conv2D-41 [[1, 528, 6, 6]] [1, 160, 6, 6] 84,640
- Conv2D-42 [[1, 160, 6, 6]] [1, 320, 6, 6] 461,120
- Conv2D-43 [[1, 528, 6, 6]] [1, 32, 6, 6] 16,928
- Conv2D-44 [[1, 32, 6, 6]] [1, 128, 6, 6] 102,528
- MaxPool2D-10 [[1, 528, 6, 6]] [1, 528, 6, 6] 0
- Conv2D-45 [[1, 528, 6, 6]] [1, 128, 6, 6] 67,712
- Inception-7 [[1, 528, 6, 6]] [1, 832, 6, 6] 0
- MaxPool2D-11 [[1, 832, 6, 6]] [1, 832, 3, 3] 0
- Conv2D-46 [[1, 832, 3, 3]] [1, 256, 3, 3] 213,248
- Conv2D-47 [[1, 832, 3, 3]] [1, 160, 3, 3] 133,280
- Conv2D-48 [[1, 160, 3, 3]] [1, 320, 3, 3] 461,120
- Conv2D-49 [[1, 832, 3, 3]] [1, 32, 3, 3] 26,656
- Conv2D-50 [[1, 32, 3, 3]] [1, 128, 3, 3] 102,528
- MaxPool2D-12 [[1, 832, 3, 3]] [1, 832, 3, 3] 0
- Conv2D-51 [[1, 832, 3, 3]] [1, 128, 3, 3] 106,624
- Inception-8 [[1, 832, 3, 3]] [1, 832, 3, 3] 0
- Conv2D-52 [[1, 832, 3, 3]] [1, 384, 3, 3] 319,872
- Conv2D-53 [[1, 832, 3, 3]] [1, 192, 3, 3] 159,936
- Conv2D-54 [[1, 192, 3, 3]] [1, 384, 3, 3] 663,936
- Conv2D-55 [[1, 832, 3, 3]] [1, 48, 3, 3] 39,984
- Conv2D-56 [[1, 48, 3, 3]] [1, 128, 3, 3] 153,728
- MaxPool2D-13 [[1, 832, 3, 3]] [1, 832, 3, 3] 0
- Conv2D-57 [[1, 832, 3, 3]] [1, 128, 3, 3] 106,624
- Inception-9 [[1, 832, 3, 3]] [1, 1024, 3, 3] 0
- AdaptiveAvgPool2D-1 [[1, 1024, 3, 3]] [1, 1024, 1, 1] 0
- Flatten-1 [[1, 1024, 1, 1]] [1, 1024] 0
- Linear-1 [[1, 1024]] [1, 10] 10,250
+ Conv2D-1 [[1, 1, 96, 96]] [1, 64, 48, 48] 3,200
+ ReLU-1 [[1, 64, 48, 48]] [1, 64, 48, 48] 0
+ MaxPool2D-1 [[1, 64, 48, 48]] [1, 64, 24, 24] 0
+ Conv2D-2 [[1, 64, 24, 24]] [1, 64, 24, 24] 4,160
+ ReLU-2 [[1, 64, 24, 24]] [1, 64, 24, 24] 0
+ Conv2D-3 [[1, 64, 24, 24]] [1, 192, 24, 24] 110,784
+ MaxPool2D-2 [[1, 192, 24, 24]] [1, 192, 12, 12] 0
+ Conv2D-4 [[1, 192, 12, 12]] [1, 64, 12, 12] 12,352
+ Conv2D-5 [[1, 192, 12, 12]] [1, 96, 12, 12] 18,528
+ Conv2D-6 [[1, 96, 12, 12]] [1, 128, 12, 12] 110,720
+ Conv2D-7 [[1, 192, 12, 12]] [1, 16, 12, 12] 3,088
+ Conv2D-8 [[1, 16, 12, 12]] [1, 32, 12, 12] 12,832
+ MaxPool2D-3 [[1, 192, 12, 12]] [1, 192, 12, 12] 0
+ Conv2D-9 [[1, 192, 12, 12]] [1, 32, 12, 12] 6,176
+ Inception-1 [[1, 192, 12, 12]] [1, 256, 12, 12] 0
+ Conv2D-10 [[1, 256, 12, 12]] [1, 128, 12, 12] 32,896
+ Conv2D-11 [[1, 256, 12, 12]] [1, 128, 12, 12] 32,896
+ Conv2D-12 [[1, 128, 12, 12]] [1, 192, 12, 12] 221,376
+ Conv2D-13 [[1, 256, 12, 12]] [1, 32, 12, 12] 8,224
+ Conv2D-14 [[1, 32, 12, 12]] [1, 96, 12, 12] 76,896
+ MaxPool2D-4 [[1, 256, 12, 12]] [1, 256, 12, 12] 0
+ Conv2D-15 [[1, 256, 12, 12]] [1, 64, 12, 12] 16,448
+ Inception-2 [[1, 256, 12, 12]] [1, 480, 12, 12] 0
+ MaxPool2D-5 [[1, 480, 12, 12]] [1, 480, 6, 6] 0
+ Conv2D-16 [[1, 480, 6, 6]] [1, 192, 6, 6] 92,352
+ Conv2D-17 [[1, 480, 6, 6]] [1, 96, 6, 6] 46,176
+ Conv2D-18 [[1, 96, 6, 6]] [1, 208, 6, 6] 179,920
+ Conv2D-19 [[1, 480, 6, 6]] [1, 16, 6, 6] 7,696
+ Conv2D-20 [[1, 16, 6, 6]] [1, 48, 6, 6] 19,248
+ MaxPool2D-6 [[1, 480, 6, 6]] [1, 480, 6, 6] 0
+ Conv2D-21 [[1, 480, 6, 6]] [1, 64, 6, 6] 30,784
+ Inception-3 [[1, 480, 6, 6]] [1, 512, 6, 6] 0
+ Conv2D-22 [[1, 512, 6, 6]] [1, 160, 6, 6] 82,080
+ Conv2D-23 [[1, 512, 6, 6]] [1, 112, 6, 6] 57,456
+ Conv2D-24 [[1, 112, 6, 6]] [1, 224, 6, 6] 226,016
+ Conv2D-25 [[1, 512, 6, 6]] [1, 24, 6, 6] 12,312
+ Conv2D-26 [[1, 24, 6, 6]] [1, 64, 6, 6] 38,464
+ MaxPool2D-7 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
+ Conv2D-27 [[1, 512, 6, 6]] [1, 64, 6, 6] 32,832
+ Inception-4 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
+ Conv2D-28 [[1, 512, 6, 6]] [1, 128, 6, 6] 65,664
+ Conv2D-29 [[1, 512, 6, 6]] [1, 128, 6, 6] 65,664
+ Conv2D-30 [[1, 128, 6, 6]] [1, 256, 6, 6] 295,168
+ Conv2D-31 [[1, 512, 6, 6]] [1, 24, 6, 6] 12,312
+ Conv2D-32 [[1, 24, 6, 6]] [1, 64, 6, 6] 38,464
+ MaxPool2D-8 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
+ Conv2D-33 [[1, 512, 6, 6]] [1, 64, 6, 6] 32,832
+ Inception-5 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
+ Conv2D-34 [[1, 512, 6, 6]] [1, 112, 6, 6] 57,456
+ Conv2D-35 [[1, 512, 6, 6]] [1, 144, 6, 6] 73,872
+ Conv2D-36 [[1, 144, 6, 6]] [1, 288, 6, 6] 373,536
+ Conv2D-37 [[1, 512, 6, 6]] [1, 32, 6, 6] 16,416
+ Conv2D-38 [[1, 32, 6, 6]] [1, 64, 6, 6] 51,264
+ MaxPool2D-9 [[1, 512, 6, 6]] [1, 512, 6, 6] 0
+ Conv2D-39 [[1, 512, 6, 6]] [1, 64, 6, 6] 32,832
+ Inception-6 [[1, 512, 6, 6]] [1, 528, 6, 6] 0
+ Conv2D-40 [[1, 528, 6, 6]] [1, 256, 6, 6] 135,424
+ Conv2D-41 [[1, 528, 6, 6]] [1, 160, 6, 6] 84,640
+ Conv2D-42 [[1, 160, 6, 6]] [1, 320, 6, 6] 461,120
+ Conv2D-43 [[1, 528, 6, 6]] [1, 32, 6, 6] 16,928
+ Conv2D-44 [[1, 32, 6, 6]] [1, 128, 6, 6] 102,528
+ MaxPool2D-10 [[1, 528, 6, 6]] [1, 528, 6, 6] 0
+ Conv2D-45 [[1, 528, 6, 6]] [1, 128, 6, 6] 67,712
+ Inception-7 [[1, 528, 6, 6]] [1, 832, 6, 6] 0
+ MaxPool2D-11 [[1, 832, 6, 6]] [1, 832, 3, 3] 0
+ Conv2D-46 [[1, 832, 3, 3]] [1, 256, 3, 3] 213,248
+ Conv2D-47 [[1, 832, 3, 3]] [1, 160, 3, 3] 133,280
+ Conv2D-48 [[1, 160, 3, 3]] [1, 320, 3, 3] 461,120
+ Conv2D-49 [[1, 832, 3, 3]] [1, 32, 3, 3] 26,656
+ Conv2D-50 [[1, 32, 3, 3]] [1, 128, 3, 3] 102,528
+ MaxPool2D-12 [[1, 832, 3, 3]] [1, 832, 3, 3] 0
+ Conv2D-51 [[1, 832, 3, 3]] [1, 128, 3, 3] 106,624
+ Inception-8 [[1, 832, 3, 3]] [1, 832, 3, 3] 0
+ Conv2D-52 [[1, 832, 3, 3]] [1, 384, 3, 3] 319,872
+ Conv2D-53 [[1, 832, 3, 3]] [1, 192, 3, 3] 159,936
+ Conv2D-54 [[1, 192, 3, 3]] [1, 384, 3, 3] 663,936
+ Conv2D-55 [[1, 832, 3, 3]] [1, 48, 3, 3] 39,984
+ Conv2D-56 [[1, 48, 3, 3]] [1, 128, 3, 3] 153,728
+ MaxPool2D-13 [[1, 832, 3, 3]] [1, 832, 3, 3] 0
+ Conv2D-57 [[1, 832, 3, 3]] [1, 128, 3, 3] 106,624
+ Inception-9 [[1, 832, 3, 3]] [1, 1024, 3, 3] 0
+ AdaptiveAvgPool2D-1 [[1, 1024, 3, 3]] [1, 1024, 1, 1] 0
+ Flatten-1 [[1, 1024, 1, 1]] [1, 1024] 0
+ Linear-1 [[1, 1024]] [1, 10] 10,250
===============================================================================
Total params: 5,977,530
Trainable params: 5,977,530
@@ -239,7 +239,7 @@ print(paddle.summary(GoogLeNet, (1, 1, 96, 96)))
Params size (MB): 22.80
Estimated Total Size (MB): 32.50
-------------------------------------------------------------------------------
-
+
{'total_params': 5977530, 'trainable_params': 5977530}
@@ -369,4 +369,3 @@ model.fit(train_dataset, val_dataset, epochs=num_epochs, batch_size=batch_size,
[Discussions](https://discuss.d2l.ai/t/1871)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.4_GoogLeNet.ipynb b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.4_GoogLeNet.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.5.md b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.5.md
old mode 100644
new mode 100755
index f1dd46341..a12bfe1cd
--- a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.5.md
+++ b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.5.md
@@ -368,4 +368,3 @@ Ali Rahimi 在接受 2017 年 NeurIPS 大会的“接受时间考验奖”(Tes
[Discussions](https://discuss.d2l.ai/t/1874)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.5_BatchNorm.ipynb b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.5_BatchNorm.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.6.md b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.6.md
old mode 100644
new mode 100755
index 5cf8ab290..be328de23
--- a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.6.md
+++ b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.6.md
@@ -211,58 +211,58 @@ print(paddle.summary(ResNet, (1, 1, 224, 224)))
```
-------------------------------------------------------------------------------
- Layer (type) Input Shape Output Shape Param #
+ Layer (type) Input Shape Output Shape Param #
===============================================================================
- Conv2D-6 [[1, 1, 224, 224]] [1, 64, 112, 112] 3,200
- BatchNorm2D-5 [[1, 64, 112, 112]] [1, 64, 112, 112] 256
- ReLU-3 [[1, 64, 112, 112]] [1, 64, 112, 112] 0
- MaxPool2D-1 [[1, 64, 112, 112]] [1, 64, 56, 56] 0
- Conv2D-7 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,928
- BatchNorm2D-6 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
- Conv2D-8 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,928
- BatchNorm2D-7 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
- Residual-3 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
- Conv2D-9 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,928
- BatchNorm2D-8 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
- Conv2D-10 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,928
- BatchNorm2D-9 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
- Residual-4 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
- Conv2D-11 [[1, 64, 56, 56]] [1, 128, 28, 28] 73,856
- BatchNorm2D-10 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
- Conv2D-12 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,584
- BatchNorm2D-11 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
- Conv2D-13 [[1, 64, 56, 56]] [1, 128, 28, 28] 8,320
- Residual-5 [[1, 64, 56, 56]] [1, 128, 28, 28] 0
- Conv2D-14 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,584
- BatchNorm2D-12 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
- Conv2D-15 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,584
- BatchNorm2D-13 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
- Residual-6 [[1, 128, 28, 28]] [1, 128, 28, 28] 0
- Conv2D-16 [[1, 128, 28, 28]] [1, 256, 14, 14] 295,168
- BatchNorm2D-14 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
- Conv2D-17 [[1, 256, 14, 14]] [1, 256, 14, 14] 590,080
- BatchNorm2D-15 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
- Conv2D-18 [[1, 128, 28, 28]] [1, 256, 14, 14] 33,024
- Residual-7 [[1, 128, 28, 28]] [1, 256, 14, 14] 0
- Conv2D-19 [[1, 256, 14, 14]] [1, 256, 14, 14] 590,080
- BatchNorm2D-16 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
- Conv2D-20 [[1, 256, 14, 14]] [1, 256, 14, 14] 590,080
- BatchNorm2D-17 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
- Residual-8 [[1, 256, 14, 14]] [1, 256, 14, 14] 0
- Conv2D-21 [[1, 256, 14, 14]] [1, 512, 7, 7] 1,180,160
- BatchNorm2D-18 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
- Conv2D-22 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,359,808
- BatchNorm2D-19 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
- Conv2D-23 [[1, 256, 14, 14]] [1, 512, 7, 7] 131,584
- Residual-9 [[1, 256, 14, 14]] [1, 512, 7, 7] 0
- Conv2D-24 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,359,808
- BatchNorm2D-20 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
- Conv2D-25 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,359,808
- BatchNorm2D-21 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
- Residual-10 [[1, 512, 7, 7]] [1, 512, 7, 7] 0
- AdaptiveAvgPool2D-1 [[1, 512, 7, 7]] [1, 512, 1, 1] 0
- Flatten-1 [[1, 512, 1, 1]] [1, 512] 0
- Linear-1 [[1, 512]] [1, 10] 5,130
+ Conv2D-6 [[1, 1, 224, 224]] [1, 64, 112, 112] 3,200
+ BatchNorm2D-5 [[1, 64, 112, 112]] [1, 64, 112, 112] 256
+ ReLU-3 [[1, 64, 112, 112]] [1, 64, 112, 112] 0
+ MaxPool2D-1 [[1, 64, 112, 112]] [1, 64, 56, 56] 0
+ Conv2D-7 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,928
+ BatchNorm2D-6 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
+ Conv2D-8 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,928
+ BatchNorm2D-7 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
+ Residual-3 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
+ Conv2D-9 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,928
+ BatchNorm2D-8 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
+ Conv2D-10 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,928
+ BatchNorm2D-9 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
+ Residual-4 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
+ Conv2D-11 [[1, 64, 56, 56]] [1, 128, 28, 28] 73,856
+ BatchNorm2D-10 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
+ Conv2D-12 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,584
+ BatchNorm2D-11 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
+ Conv2D-13 [[1, 64, 56, 56]] [1, 128, 28, 28] 8,320
+ Residual-5 [[1, 64, 56, 56]] [1, 128, 28, 28] 0
+ Conv2D-14 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,584
+ BatchNorm2D-12 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
+ Conv2D-15 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,584
+ BatchNorm2D-13 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
+ Residual-6 [[1, 128, 28, 28]] [1, 128, 28, 28] 0
+ Conv2D-16 [[1, 128, 28, 28]] [1, 256, 14, 14] 295,168
+ BatchNorm2D-14 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
+ Conv2D-17 [[1, 256, 14, 14]] [1, 256, 14, 14] 590,080
+ BatchNorm2D-15 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
+ Conv2D-18 [[1, 128, 28, 28]] [1, 256, 14, 14] 33,024
+ Residual-7 [[1, 128, 28, 28]] [1, 256, 14, 14] 0
+ Conv2D-19 [[1, 256, 14, 14]] [1, 256, 14, 14] 590,080
+ BatchNorm2D-16 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
+ Conv2D-20 [[1, 256, 14, 14]] [1, 256, 14, 14] 590,080
+ BatchNorm2D-17 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
+ Residual-8 [[1, 256, 14, 14]] [1, 256, 14, 14] 0
+ Conv2D-21 [[1, 256, 14, 14]] [1, 512, 7, 7] 1,180,160
+ BatchNorm2D-18 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
+ Conv2D-22 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,359,808
+ BatchNorm2D-19 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
+ Conv2D-23 [[1, 256, 14, 14]] [1, 512, 7, 7] 131,584
+ Residual-9 [[1, 256, 14, 14]] [1, 512, 7, 7] 0
+ Conv2D-24 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,359,808
+ BatchNorm2D-20 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
+ Conv2D-25 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,359,808
+ BatchNorm2D-21 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
+ Residual-10 [[1, 512, 7, 7]] [1, 512, 7, 7] 0
+ AdaptiveAvgPool2D-1 [[1, 512, 7, 7]] [1, 512, 1, 1] 0
+ Flatten-1 [[1, 512, 1, 1]] [1, 512] 0
+ Linear-1 [[1, 512]] [1, 10] 5,130
===============================================================================
Total params: 11,186,186
Trainable params: 11,170,570
@@ -273,7 +273,7 @@ print(paddle.summary(ResNet, (1, 1, 224, 224)))
Params size (MB): 42.67
Estimated Total Size (MB): 92.83
-------------------------------------------------------------------------------
-
+
{'total_params': 11186186, 'trainable_params': 11170570}
@@ -342,4 +342,3 @@ model.fit(train_dataset, val_dataset, epochs=num_epochs, batch_size=batch_size,
[Discussions](https://discuss.d2l.ai/t/1877)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.6_ResNet.ipynb b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.6_ResNet.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.7.md b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.7.md
old mode 100644
new mode 100755
index 2994c4343..aa7c895c3
--- a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.7.md
+++ b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.7.md
@@ -212,19 +212,19 @@ model.fit(train_dataset, val_dataset, epochs=num_epochs, batch_size=batch_size,
```
- Cache file /home/aistudio/.cache/paddle/dataset/fashion-mnist/train-images-idx3-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/fashion_mnist/train-images-idx3-ubyte.gz
+ Cache file /home/aistudio/.cache/paddle/dataset/fashion-mnist/train-images-idx3-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/fashion_mnist/train-images-idx3-ubyte.gz
Begin to download
-
+
Download finished
- Cache file /home/aistudio/.cache/paddle/dataset/fashion-mnist/train-labels-idx1-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/fashion_mnist/train-labels-idx1-ubyte.gz
+ Cache file /home/aistudio/.cache/paddle/dataset/fashion-mnist/train-labels-idx1-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/fashion_mnist/train-labels-idx1-ubyte.gz
Begin to download
........
Download finished
- Cache file /home/aistudio/.cache/paddle/dataset/fashion-mnist/t10k-images-idx3-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/fashion_mnist/t10k-images-idx3-ubyte.gz
+ Cache file /home/aistudio/.cache/paddle/dataset/fashion-mnist/t10k-images-idx3-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/fashion_mnist/t10k-images-idx3-ubyte.gz
Begin to download
-
+
Download finished
- Cache file /home/aistudio/.cache/paddle/dataset/fashion-mnist/t10k-labels-idx1-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/fashion_mnist/t10k-labels-idx1-ubyte.gz
+ Cache file /home/aistudio/.cache/paddle/dataset/fashion-mnist/t10k-labels-idx1-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/fashion_mnist/t10k-labels-idx1-ubyte.gz
Begin to download
..
Download finished
@@ -273,4 +273,3 @@ model.fit(train_dataset, val_dataset, epochs=num_epochs, batch_size=batch_size,
[Discussions](https://discuss.d2l.ai/t/1880)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.7_DenseNet.ipynb b/Dive-into-DL-paddlepaddle/docs/7_convolutional-modern/7.7_DenseNet.ipynb
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/d2l/paddle.py b/Dive-into-DL-paddlepaddle/docs/d2l/paddle.py
old mode 100644
new mode 100755
index 3fa62206b..61e38c11d
--- a/Dive-into-DL-paddlepaddle/docs/d2l/paddle.py
+++ b/Dive-into-DL-paddlepaddle/docs/d2l/paddle.py
@@ -28,14 +28,16 @@
from paddle import nn
from paddle.nn import functional as F
from paddle.vision import transforms
-
"""2.4"""
+
+
def use_svg_display():
"""使用svg格式在Jupyter中显示绘图
Defined in :numref:`sec_calculus`"""
display.set_matplotlib_formats('svg')
+
def set_figsize(figsize=(3.5, 2.5)):
"""设置matplotlib的图表大小
@@ -43,6 +45,7 @@ def set_figsize(figsize=(3.5, 2.5)):
use_svg_display()
d2l.plt.rcParams['figure.figsize'] = figsize
+
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""设置matplotlib的轴
@@ -57,9 +60,19 @@ def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
axes.legend(legend)
axes.grid()
-def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
- ylim=None, xscale='linear', yscale='linear',
- fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):
+
+def plot(X,
+ Y=None,
+ xlabel=None,
+ ylabel=None,
+ legend=None,
+ xlim=None,
+ ylim=None,
+ xscale='linear',
+ yscale='linear',
+ fmts=('-', 'm--', 'g-.', 'r:'),
+ figsize=(3.5, 2.5),
+ axes=None):
"""绘制数据点
Defined in :numref:`sec_calculus`"""
@@ -71,8 +84,8 @@ def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
# 如果X有一个轴,输出True
def has_one_axis(X):
- return (hasattr(X, "ndim") and X.ndim == 1 or isinstance(X, list)
- and not hasattr(X[0], "__len__"))
+ return (hasattr(X, "ndim") and X.ndim == 1 or isinstance(X, list) and
+ not hasattr(X[0], "__len__"))
if has_one_axis(X):
X = [X]
@@ -88,11 +101,16 @@ def has_one_axis(X):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
- set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)# Alias defined in config.ini
+ set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale,
+ legend) # Alias defined in config.ini
+
"""3.1"""
+
+
class Timer: #@save
"""记录多次运行时间"""
+
def __init__(self):
self.times = []
self.start()
@@ -118,7 +136,10 @@ def cumsum(self):
"""返回累计时间"""
return np.array(self.times).cumsum().tolist()
+
"""3.2"""
+
+
def synthetic_data(w, b, num_examples): #@save
"""生成y=Xw+b+噪声"""
X = paddle.normal(0, 1, (num_examples, len(w)))
@@ -126,36 +147,48 @@ def synthetic_data(w, b, num_examples): #@save
y += paddle.normal(0, 0.01, y.shape)
return X, y.reshape((-1, 1))
+
def linreg(X, w, b): #@save
"""线性回归模型"""
return paddle.matmul(X, w) + b
+
def squared_loss(y_hat, y):
"""均方损失。"""
return (y_hat - y.reshape(y_hat.shape))**2 / 2
+
def sgd(params, lr, batch_size): #@save
"""小批量随机梯度下降"""
- a=[]
+ a = []
with paddle.no_grad():
for params in params:
- params -= lr * params.grad/ batch_size
+ params -= lr * params.grad / batch_size
a.append(params)
return a
+
"""3.3"""
+
+
def test_load_array(data_arrays, batch_size, is_train=True):
"""构造一个Paddle数据迭代器。"""
dataset = TensorDataset(data_arrays)
return DataLoader(dataset, batch_size, shuffle=is_train)
+
"""3.5"""
+
+
def get_fashion_mnist_labels(labels): #@save
"""返回Fashion-MNIST数据集的文本标签"""
- text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
- 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
+ text_labels = [
+ 't-shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt',
+ 'sneaker', 'bag', 'ankle boot'
+ ]
return [text_labels[int(i)] for i in labels]
+
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
"""Plot a list of images."""
figsize = (num_cols * scale, num_rows * scale)
@@ -174,28 +207,36 @@ def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
ax.set_title(titles[i])
return axes
+
def get_dataloader_workers():
"""使用4个进程来读取数据。"""
return 4
+
def load_data_fashion_mnist(batch_size, resize=None): #@save
"""下载Fashion-MNIST数据集,然后将其加载到内存中"""
trans = [T.ToTensor()]
if resize:
trans.insert(0, T.Resize(resize))
trans = T.Compose(trans)
- mnist_train = paddle.vision.datasets.FashionMNIST(mode="train",transform=trans)
- mnist_test = paddle.vision.datasets.FashionMNIST(mode="test",transform=trans)
- return (paddle.io.DataLoader(dataset=mnist_train,
- batch_size=batch_size,
- shuffle=True,
- num_workers=get_dataloader_workers()),
- paddle.io.DataLoader(dataset=mnist_test,
- batch_size=batch_size,
- shuffle=True,
- num_workers=get_dataloader_workers()))
+ mnist_train = paddle.vision.datasets.FashionMNIST(
+ mode="train", transform=trans)
+ mnist_test = paddle.vision.datasets.FashionMNIST(
+ mode="test", transform=trans)
+ return (paddle.io.DataLoader(
+ dataset=mnist_train,
+ batch_size=batch_size,
+ shuffle=True,
+ num_workers=get_dataloader_workers()), paddle.io.DataLoader(
+ dataset=mnist_test,
+ batch_size=batch_size,
+ shuffle=True,
+ num_workers=get_dataloader_workers()))
+
"""3.6"""
+
+
def accuracy(y_hat, y): #@save
"""计算预测正确的数量"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
@@ -203,6 +244,7 @@ def accuracy(y_hat, y): #@save
cmp = y_hat.astype(y.dtype) == y
return float(cmp.astype(y.dtype).sum())
+
def evaluate_accuracy(net, data_iter): #@save
"""计算在指定数据集上模型的精度"""
if isinstance(net, paddle.nn.Layer):
@@ -213,8 +255,10 @@ def evaluate_accuracy(net, data_iter): #@save
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
+
class Accumulator: #@save
"""在n个变量上累加"""
+
def __init__(self, n):
self.data = [0.0] * n
@@ -227,6 +271,7 @@ def reset(self):
def __getitem__(self, idx):
return self.data[idx]
+
def train_epoch_ch3(net, train_iter, loss, updater): #@save
"""训练模型一个迭代周期(定义见第3章)"""
# 将模型设置为训练模式
@@ -255,11 +300,21 @@ def train_epoch_ch3(net, train_iter, loss, updater): #@save
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
return metric[0] / metric[2], metric[1] / metric[2]
+
class Animator: #@save
"""在动画中绘制数据"""
- def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
- ylim=None, xscale='linear', yscale='linear',
- fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
+
+ def __init__(self,
+ xlabel=None,
+ ylabel=None,
+ legend=None,
+ xlim=None,
+ ylim=None,
+ xscale='linear',
+ yscale='linear',
+ fmts=('-', 'm--', 'g-.', 'r:'),
+ nrows=1,
+ ncols=1,
figsize=(3.5, 2.5)):
# 增量地绘制多条线
if legend is None:
@@ -269,8 +324,7 @@ def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
if nrows * ncols == 1:
self.axes = [self.axes, ]
# 使用lambda函数捕获参数
- self.config_axes = lambda: d2l.set_axes(
- self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
+ self.config_axes = lambda: d2l.set_axes(self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
@@ -295,30 +349,37 @@ def add(self, x, y):
display.display(self.fig)
display.clear_output(wait=True)
+
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater): #@save
"""训练模型(定义见第3章)"""
- animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
- legend=['train loss', 'train acc', 'test acc'])
+ animator = Animator(
+ xlabel='epoch',
+ xlim=[1, num_epochs],
+ ylim=[0.3, 0.9],
+ legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
- animator.add(epoch + 1, train_metrics + (test_acc,))
+ animator.add(epoch + 1, train_metrics + (test_acc, ))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
+
def predict_ch3(net, test_iter, n=6): #@save
"""预测标签(定义见第3章)"""
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))
- titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
- d2l.show_images(
- X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])
+ titles = [true + '\n' + pred for true, pred in zip(trues, preds)]
+ d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])
+
"""4.4"""
+
+
def evaluate_loss(net, data_iter, loss): #@save
"""评估给定数据集上模型的损失。"""
metric = d2l.Accumulator(2) # 损失的总和, 样本数量
@@ -329,10 +390,12 @@ def evaluate_loss(net, data_iter, loss): #@save
metric.add(l.sum(), l.numel())
return metric[0] / metric[1]
+
"""4.10"""
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
+
def download(name, cache_dir=os.path.join('..', 'data')): #@save
"""下载一个DATA_HUB中的文件,返回本地文件名"""
assert name in DATA_HUB, f"{name} 不存在于 {DATA_HUB}"
@@ -355,6 +418,7 @@ def download(name, cache_dir=os.path.join('..', 'data')): #@save
f.write(r.content)
return fname
+
def download_extract(name, folder=None): #@save
"""下载并解压zip/tar文件"""
fname = download(name)
@@ -369,11 +433,13 @@ def download_extract(name, folder=None): #@save
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
+
def download_all(): #@save
"""下载DATA_HUB中的所有文件"""
for name in DATA_HUB:
download(name)
+
DATA_HUB['kaggle_house_train'] = ( #@save
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
@@ -381,24 +447,29 @@ def download_all(): #@save
DATA_HUB['kaggle_house_test'] = ( #@save
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
-
"""5.6"""
+
+
def try_gpu(i=0): #@save
"""如果存在,则返回gpu(i),否则返回cpu()。"""
- if len(paddle.static.cuda_places())>= i + 1:
+ if len(paddle.static.cuda_places()) >= i + 1:
return paddle.device.get_device()
return paddle.device.set_device("cpu")
+
def try_all_gpus(): #@save
"""返回所有可用的GPU,如果没有GPU,则返回[cpu(),]。"""
- devices = [paddle.device.get_device()
- for i in range(len(paddle.static.cuda_places()))
- ]
+ devices = [
+ paddle.device.get_device()
+ for i in range(len(paddle.static.cuda_places()))
+ ]
return devices if devices else paddle.device.get_device()
"""6.2"""
-def corr2d(X, K):
+
+
+def corr2d(X, K):
"""计算二维互相关运算。"""
h, w = K.shape
Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
@@ -407,25 +478,32 @@ def corr2d(X, K):
Y[i, j] = (X[i:i + h, j:j + w] * K).sum()
return Y
+
"""6.6"""
+
+
def evaluate_accuracy(data_iter, net):
acc_sum, n = 0.0, 0
with paddle.no_grad():
for X, y in data_iter:
if isinstance(net, nn.Layer):
- net.eval() # 评估模式, 这会关闭dropout
- acc_sum += (net(X).argmax(axis=1) == y.flatten()).astype('float32').sum().numpy()[0]
- net.train() # 改回训练模式
- else: # 自定义的模型, 3.13节之后不会用到, 不考虑GPU
- if('is_training' in net.__code__.co_varnames): # 如果有is_training这个参数
+ net.eval() # 评估模式, 这会关闭dropout
+ acc_sum += (net(X).argmax(axis=1) == y.flatten()
+ ).astype('float32').sum().numpy()[0]
+ net.train() # 改回训练模式
+ else: # 自定义的模型, 3.13节之后不会用到, 不考虑GPU
+ if ('is_training' in
+ net.__code__.co_varnames): # 如果有is_training这个参数
# 将is_training设置成False
- acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
+ acc_sum += (net(X, is_training=False).argmax(dim=1) == y
+ ).float().sum().item()
else:
- acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
+ acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
+
def train_ch6(net, train_iter, test_iter, batch_size, optimi, num_epochs):
loss = nn.CrossEntropyLoss()
@@ -439,25 +517,38 @@ def train_ch6(net, train_iter, test_iter, batch_size, optimi, num_epochs):
l.backward()
optimi.step()
train_l_sum += l.numpy()[0]
- train_acc_sum += (y_hat.argmax(axis=1) == y.flatten()).astype('float32').sum().numpy()[0]
+ train_acc_sum += (y_hat.argmax(axis=1) == y.flatten()
+ ).astype('float32').sum().numpy()[0]
n += y.shape[0]
batch_count += 1
test_acc = evaluate_accuracy(test_iter, net)
- print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
- % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
+ print(
+ 'epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
+ % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n,
+ test_acc, time.time() - start))
+
"""7.6"""
+
+
class Residual(nn.Layer):
- def __init__(self, input_channels, num_channels, use_1x1conv=False,
+ def __init__(self,
+ input_channels,
+ num_channels,
+ use_1x1conv=False,
strides=1):
super(Residual, self).__init__()
- self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3,
- padding=1, stride=strides)
- self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3,
- padding=1)
+ self.conv1 = nn.Conv2D(
+ input_channels,
+ num_channels,
+ kernel_size=3,
+ padding=1,
+ stride=strides)
+ self.conv2 = nn.Conv2D(
+ num_channels, num_channels, kernel_size=3, padding=1)
if use_1x1conv:
- self.conv3 = nn.Conv2D(input_channels, num_channels,
- kernel_size=1, stride=strides)
+ self.conv3 = nn.Conv2D(
+ input_channels, num_channels, kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2D(num_channels)
@@ -473,7 +564,6 @@ def forward(self, X):
return F.relu(Y)
-
ones = paddle.ones
zeros = paddle.zeros
tensor = paddle.to_tensor
@@ -506,4 +596,3 @@ def forward(self, X):
astype = lambda x, *args, **kwargs: x.type(*args, **kwargs)
transpose = lambda x, *args, **kwargs: x.t(*args, **kwargs)
reduce_mean = lambda x, *args, **kwargs: x.mean(*args, **kwargs)
-
diff --git a/Dive-into-DL-paddlepaddle/docs/data/house_tiny.csv b/Dive-into-DL-paddlepaddle/docs/data/house_tiny.csv
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/a77.svg b/Dive-into-DL-paddlepaddle/docs/img/a77.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/alexnet-original.svg b/Dive-into-DL-paddlepaddle/docs/img/alexnet-original.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/alexnet.svg b/Dive-into-DL-paddlepaddle/docs/img/alexnet.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/anchor-label.svg b/Dive-into-DL-paddlepaddle/docs/img/anchor-label.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/asyncgraph.svg b/Dive-into-DL-paddlepaddle/docs/img/asyncgraph.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/attention-output.svg b/Dive-into-DL-paddlepaddle/docs/img/attention-output.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/attention.svg b/Dive-into-DL-paddlepaddle/docs/img/attention.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/autumn-oak.jpg b/Dive-into-DL-paddlepaddle/docs/img/autumn-oak.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/aws.png b/Dive-into-DL-paddlepaddle/docs/img/aws.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/banana.jpg b/Dive-into-DL-paddlepaddle/docs/img/banana.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/beam-search.svg b/Dive-into-DL-paddlepaddle/docs/img/beam-search.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/bert-input.svg b/Dive-into-DL-paddlepaddle/docs/img/bert-input.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/bert-one-seq.svg b/Dive-into-DL-paddlepaddle/docs/img/bert-one-seq.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/bert-qa.svg b/Dive-into-DL-paddlepaddle/docs/img/bert-qa.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/bert-tagging.svg b/Dive-into-DL-paddlepaddle/docs/img/bert-tagging.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/bert-two-seqs.svg b/Dive-into-DL-paddlepaddle/docs/img/bert-two-seqs.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/birnn.svg b/Dive-into-DL-paddlepaddle/docs/img/birnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/blocks.svg b/Dive-into-DL-paddlepaddle/docs/img/blocks.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/book-org.svg b/Dive-into-DL-paddlepaddle/docs/img/book-org.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/bw-hierarchy.svg b/Dive-into-DL-paddlepaddle/docs/img/bw-hierarchy.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/capacity-vs-error.svg b/Dive-into-DL-paddlepaddle/docs/img/capacity-vs-error.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cat-dog-pixels.png b/Dive-into-DL-paddlepaddle/docs/img/cat-dog-pixels.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cat-dog-test.svg b/Dive-into-DL-paddlepaddle/docs/img/cat-dog-test.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cat-dog-train.svg b/Dive-into-DL-paddlepaddle/docs/img/cat-dog-train.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cat1.jpg b/Dive-into-DL-paddlepaddle/docs/img/cat1.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cat2.jpg b/Dive-into-DL-paddlepaddle/docs/img/cat2.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cat3.jpg b/Dive-into-DL-paddlepaddle/docs/img/cat3.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/catdog.jpg b/Dive-into-DL-paddlepaddle/docs/img/catdog.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cbow.svg b/Dive-into-DL-paddlepaddle/docs/img/cbow.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/chain-net1.svg b/Dive-into-DL-paddlepaddle/docs/img/chain-net1.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/chain-net2.svg b/Dive-into-DL-paddlepaddle/docs/img/chain-net2.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/chmod.png b/Dive-into-DL-paddlepaddle/docs/img/chmod.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cnn-rnn-self-attention.svg b/Dive-into-DL-paddlepaddle/docs/img/cnn-rnn-self-attention.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/colab-2.png b/Dive-into-DL-paddlepaddle/docs/img/colab-2.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/colab.png b/Dive-into-DL-paddlepaddle/docs/img/colab.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/computegraph.svg b/Dive-into-DL-paddlepaddle/docs/img/computegraph.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/connect.png b/Dive-into-DL-paddlepaddle/docs/img/connect.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/contribute.svg b/Dive-into-DL-paddlepaddle/docs/img/contribute.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/conv-1x1.svg b/Dive-into-DL-paddlepaddle/docs/img/conv-1x1.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/conv-multi-in.svg b/Dive-into-DL-paddlepaddle/docs/img/conv-multi-in.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/conv-pad.svg b/Dive-into-DL-paddlepaddle/docs/img/conv-pad.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/conv-stride.svg b/Dive-into-DL-paddlepaddle/docs/img/conv-stride.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/conv1d-2d.svg b/Dive-into-DL-paddlepaddle/docs/img/conv1d-2d.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/conv1d-channel.svg b/Dive-into-DL-paddlepaddle/docs/img/conv1d-channel.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/conv1d.svg b/Dive-into-DL-paddlepaddle/docs/img/conv1d.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/convex-intersect.svg b/Dive-into-DL-paddlepaddle/docs/img/convex-intersect.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/copyto.svg b/Dive-into-DL-paddlepaddle/docs/img/copyto.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/correlation.svg b/Dive-into-DL-paddlepaddle/docs/img/correlation.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cover.jpg b/Dive-into-DL-paddlepaddle/docs/img/cover.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/cuda101.png b/Dive-into-DL-paddlepaddle/docs/img/cuda101.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/data-collection.svg b/Dive-into-DL-paddlepaddle/docs/img/data-collection.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/data-parallel.svg b/Dive-into-DL-paddlepaddle/docs/img/data-parallel.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/death-cap.jpg b/Dive-into-DL-paddlepaddle/docs/img/death-cap.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/deep-rnn.svg b/Dive-into-DL-paddlepaddle/docs/img/deep-rnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/deeplearning-amazon.jpg b/Dive-into-DL-paddlepaddle/docs/img/deeplearning-amazon.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/densenet-block.svg b/Dive-into-DL-paddlepaddle/docs/img/densenet-block.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/densenet.svg b/Dive-into-DL-paddlepaddle/docs/img/densenet.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/disk.png b/Dive-into-DL-paddlepaddle/docs/img/disk.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/dog1.jpg b/Dive-into-DL-paddlepaddle/docs/img/dog1.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/dog2.jpg b/Dive-into-DL-paddlepaddle/docs/img/dog2.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/dropout2.svg b/Dive-into-DL-paddlepaddle/docs/img/dropout2.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ec2.png b/Dive-into-DL-paddlepaddle/docs/img/ec2.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/edit-file.png b/Dive-into-DL-paddlepaddle/docs/img/edit-file.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/elmo-gpt-bert.svg b/Dive-into-DL-paddlepaddle/docs/img/elmo-gpt-bert.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/encoder-decoder.svg b/Dive-into-DL-paddlepaddle/docs/img/encoder-decoder.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/eye-book.png b/Dive-into-DL-paddlepaddle/docs/img/eye-book.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/eye-book.svg b/Dive-into-DL-paddlepaddle/docs/img/eye-book.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/eye-coffee.png b/Dive-into-DL-paddlepaddle/docs/img/eye-coffee.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/eye-coffee.svg b/Dive-into-DL-paddlepaddle/docs/img/eye-coffee.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/falsesharing.svg b/Dive-into-DL-paddlepaddle/docs/img/falsesharing.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/fast-rcnn.svg b/Dive-into-DL-paddlepaddle/docs/img/fast-rcnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/faster-rcnn.svg b/Dive-into-DL-paddlepaddle/docs/img/faster-rcnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/fcn.svg b/Dive-into-DL-paddlepaddle/docs/img/fcn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/filters.png b/Dive-into-DL-paddlepaddle/docs/img/filters.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/finetune.svg b/Dive-into-DL-paddlepaddle/docs/img/finetune.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/fit-linreg.svg b/Dive-into-DL-paddlepaddle/docs/img/fit-linreg.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/flopsvsprice.svg b/Dive-into-DL-paddlepaddle/docs/img/flopsvsprice.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/forward.svg b/Dive-into-DL-paddlepaddle/docs/img/forward.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/frontends.png b/Dive-into-DL-paddlepaddle/docs/img/frontends.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/frontends.svg b/Dive-into-DL-paddlepaddle/docs/img/frontends.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/frontpage/jd-190715-en.png b/Dive-into-DL-paddlepaddle/docs/img/frontpage/jd-190715-en.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/frontpage/jd-190715-zh.png b/Dive-into-DL-paddlepaddle/docs/img/frontpage/jd-190715-zh.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ftse100.png b/Dive-into-DL-paddlepaddle/docs/img/ftse100.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/functionclasses.svg b/Dive-into-DL-paddlepaddle/docs/img/functionclasses.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/gan.svg b/Dive-into-DL-paddlepaddle/docs/img/gan.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/git-clone.png b/Dive-into-DL-paddlepaddle/docs/img/git-clone.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/git-createpr.png b/Dive-into-DL-paddlepaddle/docs/img/git-createpr.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/git-fork.png b/Dive-into-DL-paddlepaddle/docs/img/git-fork.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/git-forked.png b/Dive-into-DL-paddlepaddle/docs/img/git-forked.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/git-newpr.png b/Dive-into-DL-paddlepaddle/docs/img/git-newpr.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/grid-points.svg b/Dive-into-DL-paddlepaddle/docs/img/grid-points.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/grid-transform-filled.svg b/Dive-into-DL-paddlepaddle/docs/img/grid-transform-filled.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/grid-transform.svg b/Dive-into-DL-paddlepaddle/docs/img/grid-transform.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/gru-1.svg b/Dive-into-DL-paddlepaddle/docs/img/gru-1.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/gru-2.svg b/Dive-into-DL-paddlepaddle/docs/img/gru-2.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/gru-3.svg b/Dive-into-DL-paddlepaddle/docs/img/gru-3.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/hi-softmax.svg b/Dive-into-DL-paddlepaddle/docs/img/hi-softmax.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/hmm.svg b/Dive-into-DL-paddlepaddle/docs/img/hmm.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/house-pricing.png b/Dive-into-DL-paddlepaddle/docs/img/house-pricing.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/inception-full.svg b/Dive-into-DL-paddlepaddle/docs/img/inception-full.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/inception.svg b/Dive-into-DL-paddlepaddle/docs/img/inception.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/iou.svg b/Dive-into-DL-paddlepaddle/docs/img/iou.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/jupyter.png b/Dive-into-DL-paddlepaddle/docs/img/jupyter.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/jupyter00.png b/Dive-into-DL-paddlepaddle/docs/img/jupyter00.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/jupyter01.png b/Dive-into-DL-paddlepaddle/docs/img/jupyter01.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/jupyter02.png b/Dive-into-DL-paddlepaddle/docs/img/jupyter02.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/jupyter03.png b/Dive-into-DL-paddlepaddle/docs/img/jupyter03.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/jupyter04.png b/Dive-into-DL-paddlepaddle/docs/img/jupyter04.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/jupyter05.png b/Dive-into-DL-paddlepaddle/docs/img/jupyter05.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/jupyter06.png b/Dive-into-DL-paddlepaddle/docs/img/jupyter06.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/kaggle-cifar10.png b/Dive-into-DL-paddlepaddle/docs/img/kaggle-cifar10.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/kaggle-dog.jpg b/Dive-into-DL-paddlepaddle/docs/img/kaggle-dog.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/kaggle-submit2.png b/Dive-into-DL-paddlepaddle/docs/img/kaggle-submit2.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/kaggle.png b/Dive-into-DL-paddlepaddle/docs/img/kaggle.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/keypair.png b/Dive-into-DL-paddlepaddle/docs/img/keypair.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/koebel.jpg b/Dive-into-DL-paddlepaddle/docs/img/koebel.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/latencynumbers.png b/Dive-into-DL-paddlepaddle/docs/img/latencynumbers.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/launching.png b/Dive-into-DL-paddlepaddle/docs/img/launching.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/lenet-vert.svg b/Dive-into-DL-paddlepaddle/docs/img/lenet-vert.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/lenet.svg b/Dive-into-DL-paddlepaddle/docs/img/lenet.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/limits.png b/Dive-into-DL-paddlepaddle/docs/img/limits.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/lstm-0.svg b/Dive-into-DL-paddlepaddle/docs/img/lstm-0.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/lstm-1.svg b/Dive-into-DL-paddlepaddle/docs/img/lstm-1.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/lstm-2.svg b/Dive-into-DL-paddlepaddle/docs/img/lstm-2.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/lstm-3.svg b/Dive-into-DL-paddlepaddle/docs/img/lstm-3.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/marginal.svg b/Dive-into-DL-paddlepaddle/docs/img/marginal.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/mask-rcnn.svg b/Dive-into-DL-paddlepaddle/docs/img/mask-rcnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/miniconda.png b/Dive-into-DL-paddlepaddle/docs/img/miniconda.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ml-loop.svg b/Dive-into-DL-paddlepaddle/docs/img/ml-loop.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/mlp.svg b/Dive-into-DL-paddlepaddle/docs/img/mlp.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/mobo-symbol.svg b/Dive-into-DL-paddlepaddle/docs/img/mobo-symbol.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/multi-head-attention.svg b/Dive-into-DL-paddlepaddle/docs/img/multi-head-attention.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/mutual-information.svg b/Dive-into-DL-paddlepaddle/docs/img/mutual-information.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/negSecDer.svg b/Dive-into-DL-paddlepaddle/docs/img/negSecDer.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/neon128.svg b/Dive-into-DL-paddlepaddle/docs/img/neon128.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/neural-style.jpg b/Dive-into-DL-paddlepaddle/docs/img/neural-style.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/neural-style.svg b/Dive-into-DL-paddlepaddle/docs/img/neural-style.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/neuron.svg b/Dive-into-DL-paddlepaddle/docs/img/neuron.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nin.svg b/Dive-into-DL-paddlepaddle/docs/img/nin.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nli-attention.svg b/Dive-into-DL-paddlepaddle/docs/img/nli-attention.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nlp-map-app.svg b/Dive-into-DL-paddlepaddle/docs/img/nlp-map-app.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nlp-map-nli-attention.svg b/Dive-into-DL-paddlepaddle/docs/img/nlp-map-nli-attention.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nlp-map-nli-bert.svg b/Dive-into-DL-paddlepaddle/docs/img/nlp-map-nli-bert.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nlp-map-pretrain.svg b/Dive-into-DL-paddlepaddle/docs/img/nlp-map-pretrain.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nlp-map-sa-cnn.svg b/Dive-into-DL-paddlepaddle/docs/img/nlp-map-sa-cnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nlp-map-sa-rnn.svg b/Dive-into-DL-paddlepaddle/docs/img/nlp-map-sa-rnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nonconvex.svg b/Dive-into-DL-paddlepaddle/docs/img/nonconvex.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nvlink-twoloop.svg b/Dive-into-DL-paddlepaddle/docs/img/nvlink-twoloop.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/nvlink.svg b/Dive-into-DL-paddlepaddle/docs/img/nvlink.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/p2x.png b/Dive-into-DL-paddlepaddle/docs/img/p2x.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/pacman.svg b/Dive-into-DL-paddlepaddle/docs/img/pacman.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/par-vec.svg b/Dive-into-DL-paddlepaddle/docs/img/par-vec.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/pikachu.jpg b/Dive-into-DL-paddlepaddle/docs/img/pikachu.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/polygon-circle.svg b/Dive-into-DL-paddlepaddle/docs/img/polygon-circle.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/pooling.svg b/Dive-into-DL-paddlepaddle/docs/img/pooling.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/popvssoda.png b/Dive-into-DL-paddlepaddle/docs/img/popvssoda.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/posSecDer.svg b/Dive-into-DL-paddlepaddle/docs/img/posSecDer.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/proj-vec.svg b/Dive-into-DL-paddlepaddle/docs/img/proj-vec.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/projections.svg b/Dive-into-DL-paddlepaddle/docs/img/projections.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ps-distributed.svg b/Dive-into-DL-paddlepaddle/docs/img/ps-distributed.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ps-multimachine.svg b/Dive-into-DL-paddlepaddle/docs/img/ps-multimachine.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ps-multips.svg b/Dive-into-DL-paddlepaddle/docs/img/ps-multips.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ps.svg b/Dive-into-DL-paddlepaddle/docs/img/ps.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/qkv.svg b/Dive-into-DL-paddlepaddle/docs/img/qkv.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/r-cnn.svg b/Dive-into-DL-paddlepaddle/docs/img/r-cnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rainier.jpg b/Dive-into-DL-paddlepaddle/docs/img/rainier.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rec-caser.svg b/Dive-into-DL-paddlepaddle/docs/img/rec-caser.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rec-deepfm.svg b/Dive-into-DL-paddlepaddle/docs/img/rec-deepfm.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rec-intro.svg b/Dive-into-DL-paddlepaddle/docs/img/rec-intro.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rec-mf.svg b/Dive-into-DL-paddlepaddle/docs/img/rec-mf.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rec-neumf.svg b/Dive-into-DL-paddlepaddle/docs/img/rec-neumf.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rec-ranking.svg b/Dive-into-DL-paddlepaddle/docs/img/rec-ranking.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rec-seq-data.svg b/Dive-into-DL-paddlepaddle/docs/img/rec-seq-data.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rect-trans.svg b/Dive-into-DL-paddlepaddle/docs/img/rect-trans.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/residual-block.svg b/Dive-into-DL-paddlepaddle/docs/img/residual-block.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/resnet-block.svg b/Dive-into-DL-paddlepaddle/docs/img/resnet-block.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/resnet18.svg b/Dive-into-DL-paddlepaddle/docs/img/resnet18.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ringsync.svg b/Dive-into-DL-paddlepaddle/docs/img/ringsync.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rl-environment.svg b/Dive-into-DL-paddlepaddle/docs/img/rl-environment.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rnn-bptt.svg b/Dive-into-DL-paddlepaddle/docs/img/rnn-bptt.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rnn-train.svg b/Dive-into-DL-paddlepaddle/docs/img/rnn-train.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/rnn.svg b/Dive-into-DL-paddlepaddle/docs/img/rnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/roi.svg b/Dive-into-DL-paddlepaddle/docs/img/roi.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/s2s-prob1.svg b/Dive-into-DL-paddlepaddle/docs/img/s2s-prob1.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/s2s-prob2.svg b/Dive-into-DL-paddlepaddle/docs/img/s2s-prob2.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sagemaker-create-2.png b/Dive-into-DL-paddlepaddle/docs/img/sagemaker-create-2.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sagemaker-create-3.png b/Dive-into-DL-paddlepaddle/docs/img/sagemaker-create-3.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sagemaker-create.png b/Dive-into-DL-paddlepaddle/docs/img/sagemaker-create.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sagemaker-jupyter.png b/Dive-into-DL-paddlepaddle/docs/img/sagemaker-jupyter.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sagemaker-open.png b/Dive-into-DL-paddlepaddle/docs/img/sagemaker-open.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sagemaker-stop.png b/Dive-into-DL-paddlepaddle/docs/img/sagemaker-stop.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sagemaker-terminal.png b/Dive-into-DL-paddlepaddle/docs/img/sagemaker-terminal.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sagemaker.png b/Dive-into-DL-paddlepaddle/docs/img/sagemaker.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/segmentation.svg b/Dive-into-DL-paddlepaddle/docs/img/segmentation.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/self-attention.svg b/Dive-into-DL-paddlepaddle/docs/img/self-attention.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/seq2seq-attention-details.svg b/Dive-into-DL-paddlepaddle/docs/img/seq2seq-attention-details.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/seq2seq-attention.svg b/Dive-into-DL-paddlepaddle/docs/img/seq2seq-attention.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/seq2seq-details.svg b/Dive-into-DL-paddlepaddle/docs/img/seq2seq-details.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/seq2seq-predict.svg b/Dive-into-DL-paddlepaddle/docs/img/seq2seq-predict.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/seq2seq.svg b/Dive-into-DL-paddlepaddle/docs/img/seq2seq.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sequence-model.svg b/Dive-into-DL-paddlepaddle/docs/img/sequence-model.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/singleneuron.svg b/Dive-into-DL-paddlepaddle/docs/img/singleneuron.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/skip-gram.svg b/Dive-into-DL-paddlepaddle/docs/img/skip-gram.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/skylake.svg b/Dive-into-DL-paddlepaddle/docs/img/skylake.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/softmaxreg.svg b/Dive-into-DL-paddlepaddle/docs/img/softmaxreg.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/space-division-3d.svg b/Dive-into-DL-paddlepaddle/docs/img/space-division-3d.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/space-division.svg b/Dive-into-DL-paddlepaddle/docs/img/space-division.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/speech.png b/Dive-into-DL-paddlepaddle/docs/img/speech.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/splitting.svg b/Dive-into-DL-paddlepaddle/docs/img/splitting.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ssd.svg b/Dive-into-DL-paddlepaddle/docs/img/ssd.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/stackedanimals.png b/Dive-into-DL-paddlepaddle/docs/img/stackedanimals.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/statistical-significance.svg b/Dive-into-DL-paddlepaddle/docs/img/statistical-significance.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/style-transfer.svg b/Dive-into-DL-paddlepaddle/docs/img/style-transfer.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sub-area.svg b/Dive-into-DL-paddlepaddle/docs/img/sub-area.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/sum-order.svg b/Dive-into-DL-paddlepaddle/docs/img/sum-order.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/supervised-learning.svg b/Dive-into-DL-paddlepaddle/docs/img/supervised-learning.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/tensorcore.jpg b/Dive-into-DL-paddlepaddle/docs/img/tensorcore.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/textcnn.svg b/Dive-into-DL-paddlepaddle/docs/img/textcnn.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/threading.svg b/Dive-into-DL-paddlepaddle/docs/img/threading.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/timemachine-5gram.svg b/Dive-into-DL-paddlepaddle/docs/img/timemachine-5gram.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/trans_conv.svg b/Dive-into-DL-paddlepaddle/docs/img/trans_conv.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/trans_conv_stride2.svg b/Dive-into-DL-paddlepaddle/docs/img/trans_conv_stride2.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/transformer.svg b/Dive-into-DL-paddlepaddle/docs/img/transformer.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/truncated-bptt.svg b/Dive-into-DL-paddlepaddle/docs/img/truncated-bptt.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/turing-processing-block.png b/Dive-into-DL-paddlepaddle/docs/img/turing-processing-block.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/turing.png b/Dive-into-DL-paddlepaddle/docs/img/turing.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/twogpu.svg b/Dive-into-DL-paddlepaddle/docs/img/twogpu.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/ubuntu-new.png b/Dive-into-DL-paddlepaddle/docs/img/ubuntu-new.png
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/vec-add.svg b/Dive-into-DL-paddlepaddle/docs/img/vec-add.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/vec-angle.svg b/Dive-into-DL-paddlepaddle/docs/img/vec-angle.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/vgg.svg b/Dive-into-DL-paddlepaddle/docs/img/vgg.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/wake-word.svg b/Dive-into-DL-paddlepaddle/docs/img/wake-word.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/waldo-mask.jpg b/Dive-into-DL-paddlepaddle/docs/img/waldo-mask.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/wattvsprice.svg b/Dive-into-DL-paddlepaddle/docs/img/wattvsprice.svg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/where-wally-walker-books.jpg b/Dive-into-DL-paddlepaddle/docs/img/where-wally-walker-books.jpg
old mode 100644
new mode 100755
diff --git a/Dive-into-DL-paddlepaddle/docs/img/zeroSecDer.svg b/Dive-into-DL-paddlepaddle/docs/img/zeroSecDer.svg
old mode 100644
new mode 100755
diff --git a/LICENSE b/LICENSE
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/README.md b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/README.md
old mode 100644
new mode 100755
index fcc16d814..e7da53591
--- a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/README.md
@@ -151,7 +151,7 @@ def generate_proposal_file(detection_result_dir,proposal_file_path):
width = sp[1]#width(colums) of image
file_name=txt_file.split("/")[-1].split("_")[-1].replace(".txt","")
-
+
key = sub_dir+","+file_name
#print(file_name,key)
person_list = []
@@ -177,13 +177,13 @@ def generate_proposal_file(detection_result_dir,proposal_file_path):
person_proposal = [x1,y1,x2,y2,score]
person_list.append(person_proposal)
-
+
proposals[key] = person_list
-
+
#for key,value in proposals.items():
# if '00001' in key:
# print(key,value)
-
+
with open(proposal_file_path, 'wb') as handle:
pickle.dump(proposals, handle, protocol=pickle.HIGHEST_PROTOCOL)
@@ -334,23 +334,23 @@ Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端
(1) 引用 paddle inference 预测库
import paddle.inference as paddle_infer
(2) 创建配置对象,并根据需求配置
- # 创建 config,并设置预测模型路径
+ # 创建 config,并设置预测模型路径
config = paddle_infer.Config(args.model_file, args.params_file)
(3) 根据Config创建预测对象
predictor = paddle_infer.create_predictor(config)
(4) 设置模型输入 Tensor
- # 获取输入的名称
- input_names = predictor.get_input_names()
- input_handle = predictor.get_input_handle(input_names[0])
- # 设置输入
- fake_input = np.random.randn(args.batch_size, 3, 318, 318).astype("float32")
- input_handle.reshape([args.batch_size, 3, 318, 318])
+ # 获取输入的名称
+ input_names = predictor.get_input_names()
+ input_handle = predictor.get_input_handle(input_names[0])
+ # 设置输入
+ fake_input = np.random.randn(args.batch_size, 3, 318, 318).astype("float32")
+ input_handle.reshape([args.batch_size, 3, 318, 318])
input_handle.copy_from_cpu(fake_input)
(5) 执行预测
predictor.run()
(6) 获得预测结果
- output_names = predictor.get_output_names()
- output_handle = predictor.get_output_handle(output_names[0])
+ output_names = predictor.get_output_names()
+ output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu() # numpy.ndarray类型
```
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/abnoraml_action.yaml b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/abnoraml_action.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/extract_video_frames.sh b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/extract_video_frames.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/SlowFast.png b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/SlowFast.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/action_related_tasks.png b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/action_related_tasks.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/alpha_1.png b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/alpha_1.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/alpha_2.png b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/alpha_2.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/background.png b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/background.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/difficulties.png b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/difficulties.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/inference.png b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/inference.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/solution.png b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/images/solution.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/infer_batch.py b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/infer_batch.py
old mode 100644
new mode 100755
index 7cbbf86d9..46dea60bd
--- a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/infer_batch.py
+++ b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/infer_batch.py
@@ -86,6 +86,7 @@ def parse_args():
args = parser.parse_args()
return args
+
# 两级目录
def get_test_images(infer_dir):
"""
@@ -102,16 +103,15 @@ def get_test_images(infer_dir):
sub_dir_list = os.listdir(infer_dir)
- print("*********sub_dir_list:",sub_dir_list)
+ print("*********sub_dir_list:", sub_dir_list)
-
# 返回结果
images = set()
for sub_dir in sub_dir_list:
- video_dir = os.path.join(infer_dir,sub_dir)
+ video_dir = os.path.join(infer_dir, sub_dir)
for ext in exts:
images.update(glob.glob('{}/*.{}'.format(video_dir, ext)))
-
+
images = list(images)
assert len(images) > 0, "no image found in {}".format(infer_dir)
@@ -130,8 +130,8 @@ def run(FLAGS, cfg):
# get inference images
images = get_test_images(FLAGS.infer_dir)
- print("the number of images:",len(images)) #the number of images: 31631
-
+ print("the number of images:", len(images)) #the number of images: 31631
+
# inference
trainer.predict_batch(
images,
@@ -158,7 +158,8 @@ def main():
else:
place = paddle.set_device('cpu')
- if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:
+ if 'norm_type' in cfg and cfg[
+ 'norm_type'] == 'sync_bn' and not cfg.use_gpu:
cfg['norm_type'] = 'bn'
if FLAGS.slim_config:
diff --git a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/trainer.py b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/trainer.py
old mode 100644
new mode 100755
index a5d853e92..da63b82d6
--- a/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/trainer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Abnormal_Action_Detection/trainer.py
@@ -413,10 +413,10 @@ def evaluate(self):
self._eval_with_loader(self.loader)
def predict_batch(self,
- images,
- draw_threshold=0.5,
- output_dir='output',
- save_txt=False):
+ images,
+ draw_threshold=0.5,
+ output_dir='output',
+ save_txt=False):
self.dataset.set_images(images)
print("** finish set images **")
loader = create('TestReader')(self.dataset, 0)
@@ -446,7 +446,7 @@ def predict_batch(self,
if hasattr(value, 'numpy'):
outs[key] = value.numpy()
results.append(outs)
-
+
for outs in results:
batch_res = get_infer_results(outs, clsid2catid)
bbox_num = outs['bbox_num']
@@ -474,17 +474,17 @@ def predict_batch(self,
if self._compose_callback:
self._compose_callback.on_step_end(self.status)
# save image with detection
-
+
path_list = image_path.split("/")
image_name = path_list[-1]
video_name = path_list[-2]
-
+
# 检测结果存放目录
- save_dir = os.path.join(output_dir,video_name)
+ save_dir = os.path.join(output_dir, video_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
- save_name = os.path.join(save_dir,image_name)
+ save_name = os.path.join(save_dir, image_name)
#print("save_name:",save_name)
logger.info("Detection bbox results save in {}".format(
save_name))
@@ -494,7 +494,6 @@ def predict_batch(self,
save_result(save_path, bbox_res, catid2name, draw_threshold)
start = end
-
def predict(self,
images,
draw_threshold=0.5,
@@ -526,7 +525,7 @@ def predict(self,
if hasattr(value, 'numpy'):
outs[key] = value.numpy()
results.append(outs)
-
+
for outs in results:
batch_res = get_infer_results(outs, clsid2catid)
bbox_num = outs['bbox_num']
@@ -618,7 +617,7 @@ def _get_infer_cfg_and_input_spec(self, save_dir, prune_input=True):
"crops": InputSpec(
shape=[None, 3, 192, 64], name='crops')
})
-
+
static_model = None
pruned_input_spec = input_spec
diff --git a/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/coco_detection.yml b/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/coco_detection.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/data_2_coco.py b/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/data_2_coco.py
old mode 100644
new mode 100755
index b83ebf550..fc19e7c5a
--- a/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/data_2_coco.py
+++ b/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/data_2_coco.py
@@ -1,4 +1,4 @@
-import os,sys
+import os, sys
import json
import cv2
import glob
@@ -9,40 +9,41 @@
DATA_DIR = "/home/aistudio/work/dataset/coco"
-class Transfer2COCO:
- def __init__(self,is_mode='train'):
+class Transfer2COCO:
+ def __init__(self, is_mode='train'):
self.images = []
self.annotations = []
self.categories = []
self.img_id = 0
self.ann_id = 0
self.is_mode = is_mode
- if not os.path.exists(DATA_DIR+"/{}".format(self.is_mode)):
- os.makedirs(DATA_DIR+"/{}".format(self.is_mode))
+ if not os.path.exists(DATA_DIR + "/{}".format(self.is_mode)):
+ os.makedirs(DATA_DIR + "/{}".format(self.is_mode))
- def to_coco(self, anno_file,img_dir,label_list_path):
+ def to_coco(self, anno_file, img_dir, label_list_path):
self._init_categories(label_list_path)
- with open(anno_file,'r') as f:
- anno_result= f.readlines()
-
+ with open(anno_file, 'r') as f:
+ anno_result = f.readlines()
+
for item in anno_result:
items = item.strip().split('\t')
-
+
image_file = items[0]
- image_file_path = os.path.join(img_dir,image_file)
+ image_file_path = os.path.join(img_dir, image_file)
- bboxs=[]
- detect_labels=[]
+ bboxs = []
+ detect_labels = []
for anno in items[1:]:
- if len(anno.strip())<1:
+ if len(anno.strip()) < 1:
continue
object = json.loads(anno.strip())
detect_name = object['value']
detect_label = self.name_dict[detect_name]
- coord = object['coordinate']#[[435.478,126.261],[697.043,382.261]]
+ coord = object[
+ 'coordinate'] #[[435.478,126.261],[697.043,382.261]]
box = []
box.append(coord[0][0])
box.append(coord[0][1])
@@ -55,9 +56,9 @@ def to_coco(self, anno_file,img_dir,label_list_path):
#这种读取方法更快
img = Image.open(image_file_path)
w, h = img.size
- self.images.append(self._image(image_file_path,h, w))
+ self.images.append(self._image(image_file_path, h, w))
- self._cp_img(image_file_path)#复制文件路径
+ self._cp_img(image_file_path) #复制文件路径
if self.img_id % 200 is 0:
print("处理到第{}张图片".format(self.img_id))
for bbox, label in zip(bboxs, detect_labels):
@@ -74,35 +75,37 @@ def to_coco(self, anno_file,img_dir,label_list_path):
instance['categories'] = self.categories
return instance
- def _init_categories(self,label_list_path):
- with open(label_list_path,'r') as f:
+ def _init_categories(self, label_list_path):
+ with open(label_list_path, 'r') as f:
lines = f.readlines()
- self.name_dict={}
+ self.name_dict = {}
for line in lines:
- items=line.strip().split(' ')
+ items = line.strip().split(' ')
category = {}
category['id'] = items[0]
category['name'] = items[1]
- self.name_dict[items[1]]=items[0]
+ self.name_dict[items[1]] = items[0]
category['supercategory'] = 'defect_name'
self.categories.append(category)
- def _image(self, path,h,w):
+ def _image(self, path, h, w):
image = {}
image['height'] = h
image['width'] = w
image['id'] = self.img_id
- image['file_name'] = os.path.basename(path)#返回path最后的文件名
+ image['file_name'] = os.path.basename(path) #返回path最后的文件名
return image
- def _annotation(self,label,bbox):
- area=(bbox[2]-bbox[0])*(bbox[3]-bbox[1])
- points=[[bbox[0],bbox[1]],[bbox[2],bbox[1]],[bbox[2],bbox[3]],[bbox[0],bbox[3]]]
+ def _annotation(self, label, bbox):
+ area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
+ points = [[bbox[0], bbox[1]], [bbox[2], bbox[1]], [bbox[2], bbox[3]],
+ [bbox[0], bbox[3]]]
annotation = {}
annotation['id'] = self.ann_id
annotation['image_id'] = self.img_id
annotation['category_id'] = label
- annotation['segmentation'] = []# np.asarray(points).flatten().tolist()
+ annotation['segmentation'] = [
+ ] # np.asarray(points).flatten().tolist()
annotation['bbox'] = self._get_box(points)
annotation['iscrowd'] = 0
annotation["ignore"] = 0
@@ -110,8 +113,10 @@ def _annotation(self,label,bbox):
return annotation
def _cp_img(self, img_path):
- shutil.copy(img_path, os.path.join(DATA_DIR+"/{}".format(self.is_mode), os.path.basename(img_path)))
-
+ shutil.copy(img_path,
+ os.path.join(DATA_DIR + "/{}".format(self.is_mode),
+ os.path.basename(img_path)))
+
def _get_box(self, points):
min_x = min_y = np.inf
max_x = max_y = 0
@@ -126,26 +131,33 @@ def _get_box(self, points):
def save_coco_json(self, instance, save_path):
import json
with open(save_path, 'w') as fp:
- json.dump(instance, fp, indent=1, separators=(',', ': '))#缩进设置为1,元素之间用逗号隔开 , key和内容之间 用冒号隔开
+ json.dump(
+ instance, fp, indent=1,
+ separators=(',', ': ')) #缩进设置为1,元素之间用逗号隔开 , key和内容之间 用冒号隔开
+
transfer = Transfer2COCO()
#训练集
img_dir = "/home/aistudio/data/data6045"
-anno_dir="/home/aistudio/data/data6045/train.txt"
+anno_dir = "/home/aistudio/data/data6045/train.txt"
label_list_file = '/home/aistudio/data/data6045/label_list.txt'
-train_instance = transfer.to_coco(anno_dir,img_dir,label_list_file)
-if not os.path.exists(DATA_DIR+"/annotations/"):
- os.makedirs(DATA_DIR+"/annotations/")
-transfer.save_coco_json(train_instance, DATA_DIR+"/annotations/"+'instances_{}.json'.format("train"))
+train_instance = transfer.to_coco(anno_dir, img_dir, label_list_file)
+if not os.path.exists(DATA_DIR + "/annotations/"):
+ os.makedirs(DATA_DIR + "/annotations/")
+transfer.save_coco_json(
+ train_instance,
+ DATA_DIR + "/annotations/" + 'instances_{}.json'.format("train"))
transfer = Transfer2COCO(is_mode='eval')
#验证集
img_dir = "/home/aistudio/data/data6045"
-anno_dir="/home/aistudio/data/data6045/eval.txt"
+anno_dir = "/home/aistudio/data/data6045/eval.txt"
label_list_file = '/home/aistudio/data/data6045/label_list.txt'
-train_instance = transfer.to_coco(anno_dir,img_dir,label_list_file)
-if not os.path.exists(DATA_DIR+"/annotations/"):
- os.makedirs(DATA_DIR+"/annotations/")
-transfer.save_coco_json(train_instance, DATA_DIR+"/annotations/"+'instances_{}.json'.format("eval"))
+train_instance = transfer.to_coco(anno_dir, img_dir, label_list_file)
+if not os.path.exists(DATA_DIR + "/annotations/"):
+ os.makedirs(DATA_DIR + "/annotations/")
+transfer.save_coco_json(
+ train_instance,
+ DATA_DIR + "/annotations/" + 'instances_{}.json'.format("eval"))
diff --git a/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/images/data_demo.jpeg b/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/images/data_demo.jpeg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/optimizer_2x.yml b/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/optimizer_2x.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/ppyolo_r50vd_dcn.yml b/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/ppyolo_r50vd_dcn.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/ppyolo_r50vd_dcn_2x_coco.yml b/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/ppyolo_r50vd_dcn_2x_coco.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/ppyolo_reader.yml b/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/ppyolo_reader.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/runtime.yml b/Paddle_Industry_Practice_Sample_Library/Bolt_and_Nut_Detection/runtime.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/README.md b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/README.md
new file mode 100644
index 000000000..895b81b57
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/README.md
@@ -0,0 +1,171 @@
+# ڵƿȫ
+
+##
+
+* [Ŀ˵](#Ŀ˵)
+* [װ˵](#װ˵)
+* [](#)
+* [ģѡ](#ģѡ)
+* [ģѵ](#ģѵ)
+* [ģ͵](#ģ͵)
+* [](#)
+* [ⲿ](#ⲿ)
+
+
+
+ƿ¥뻧ϹżʣԸƳӦĵƿҼģּͣڴԴͷһķ ĦгģͿܻᷢ˶ͼʽʵָΪȷʶ ʹ˷ɽĿPaddleDetectionеpicodetģԼͼʶPaddleClasеͨʶģ͡
+
+
+
+ע:AI Studioдο[ڵƿȫ](https://aistudio.baidu.com/aistudio/projectdetail/3497217?channelType=0&channel=0)(gpuԴ)
+## 2 װ˵
+
+##### Ҫ
+
+* PaddlePaddle = 2.2.2
+* Python >= 3.5
+
+
+
+## 3
+
+picodetģݼΪVOCʽ(ʹlabelimgƳ)21903ŵеͼƬѵ17522ţԼ4381ţճĵݳУ14715ĦгĿ23058˵Ŀ3750гĿpicodetʹõcocoʽҪVOCʽתcocoʽ VOCݼʹpythonlabelimgͼעΪԭʼͼƬɶӦıעxmlļΪԭʼVOCʽݼɵxmlļʽͼʾÿobjectÿһobjectеnameֶbndboxаľ꣨ϽԼ½ǣ
+
+
+
+
+
+
+VOCݼ ͼƬעһݼÿͼƬxmlӦɶӦѵԼԼ.
+
+```
+ classify_voc.py
+ picodet_motorcycle
+ Annotations
+ 1595214506200933-1604535322-[]-motorcycle.xml
+ 1595214506200933-1604542813-[]-motorcycle.xml
+ 1595214506200933-1604559538-[]-motorcycle.xml
+| ...
+ ImageSets
+ Main
+ test.txt
+ train.txt
+ trainval.txt
+ val.txt
+ JPEGImages
+ 1595214506200933-1604535322-[]-motorcycle.jpg
+ 1595214506200933-1604542813-[]-motorcycle.jpg
+ 1595214506200933-1604559538-[]-motorcycle.jpg
+ | ...
+ picodet_motorcycle.zip
+ prepare_voc_data.py
+ test.txt
+ trainval.txt
+```
+
+VOCݼ [صַ](https://aistudio.baidu.com/aistudio/datasetdetail/128282)
+ݼ [صַ](https://aistudio.baidu.com/aistudio/datasetdetail/128448)
+VOCʽݼתΪcocoʽʹpaddleԴתű
+˵ʹʱ·
+```
+python x2coco.py --dataset_type voc --voc_anno_dir /home/aistudio/data/data128282/ --voc_anno_list /home/aistudio/data/data128282/trainval.txt --voc_label_list /home/aistudio/data/data128282/label_list.txt --voc_out_name voc_train.json
+python x2coco.py --dataset_type voc --voc_anno_dir /home/aistudio/data/data128282/ --voc_anno_list /home/aistudio/data/data128282/test.txt --voc_label_list /home/aistudio/data/data128282/label_list.txt --voc_out_name voc_test.json
+mv voc_test.json /home/aistudio/data/data128282/
+mv voc_train.json /home/aistudio/data/data128282/
+
+```
+
+
+## 4 ģѡ
+
+ѡPaddleDetectionȫµϵģPP-PicoDet
+
+PP-PicoDetģص㣺
+
+ - ߵmAP: һ1M֮mAP(0.5:0.95)Խ30+(416ʱ)
+ - Ԥٶ: ԤARM CPU¿ɴ150FPS
+ - Ѻ: ֧PaddleLite/MNN/NCNN/OpenVINOԤ⣬֧תONNXṩC++/Python/Androiddemo
+ - Ƚ㷨: SOTA㷨н˴, ESNet, CSP-PAN, SimOTAȵȡ
+
+
+
+
+## 5 ģѵ
+
+
+Ȱװ
+```
+cd code/train/
+pip install pycocotools
+pip install faiss-gpu
+pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
+```
+
+Ϊservingģ͵
+```
+pip install paddle-serving-app==0.6.2 -i https://pypi.tuna.tsinghua.edu.cn/simple
+pip install paddle-serving-client==0.6.2 -i https://pypi.tuna.tsinghua.edu.cn/simple
+pip install paddle-serving-server-gpu==0.6.3.post102 -i https://pypi.tuna.tsinghua.edu.cn/simple
+```
+
+
+
+## 6 ģ͵
+
+
+Ϊservingģ
+```
+cd code/train/
+python export_model.py --export_serving_model=true -c picodet_lcnet_1_5x_416_coco.yml --output_dir=./output_inference/
+```
+
+```
+cd code/train/output_inference/picodet_lcnet_1_5x_416_coco/
+mv serving_server/ code/picodet_lcnet_1_5x_416_coco/
+```
+
+
+```
+cd /home/aistudio/work/code/picodet_lcnet_1_5x_416_coco/
+python3 web_service.py
+```
+
+ͼʾ:
+
+
+
+
+
+## 7
+
+ĿģͲϺƿҼĹܱͶʹãΪȷȼҪһļʽPaddleClasͼʶеͨʶģgeneral_PPLCNet_x2_5_lite_v1.0_infer
+
+ȴpaddleؽѹģͲΪservingģ
+```
+cd code/
+wget -P models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
+cd models
+tar -xf general_PPLCNet_x2_5_lite_v1.0_infer.tar
+python3 -m paddle_serving_client.convert --dirname ./general_PPLCNet_x2_5_lite_v1.0_infer/ --model_filename inference.pdmodel --params_filename inference.pdiparams --serving_server ./general_PPLCNet_x2_5_lite_v1.0_serving/ --serving_client ./general_PPLCNet_x2_5_lite_v1.0_client/
+cp -r ./general_PPLCNet_x2_5_lite_v1.0_serving ../general_PPLCNet_x2_5_lite_v1.0/
+```
+
+ѹݼ·make_label.py,.
+```
+cd code
+python make_label.py
+python python/build_gallery.py -c build_gallery/build_general.yaml -o IndexProcess.data_file="./index_label.txt" -o IndexProcess.index_dir="index_result"
+mv index_result/ general_PPLCNet_x2_5_lite_v1.0/
+```
+
+
+
+## 7 ⲿ
+```
+cd /home/aistudio/work/code/general_PPLCNet_x2_5_lite_v1.0/
+python recognition_web_service_onlyrec.py
+```
+
+ʵʳͼʾ
+
+
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/build_gallery.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/build_gallery.cpython-37.pyc
new file mode 100644
index 000000000..0127dba01
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/build_gallery.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/det_preprocess.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/det_preprocess.cpython-37.pyc
new file mode 100644
index 000000000..5d7e3c23a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/det_preprocess.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/postprocess.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/postprocess.cpython-37.pyc
new file mode 100644
index 000000000..48953d4a7
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/postprocess.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/predict_rec.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/predict_rec.cpython-37.pyc
new file mode 100644
index 000000000..661ed04d7
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/predict_rec.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/preprocess.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/preprocess.cpython-37.pyc
new file mode 100644
index 000000000..4369f8ab4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/__pycache__/preprocess.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/build_gallery.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/build_gallery.py
new file mode 100644
index 000000000..5a7d82fb5
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/build_gallery.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+import cv2
+import faiss
+import numpy as np
+from tqdm import tqdm
+import pickle
+from predict_rec import RecPredictor
+
+from utils import logger
+from utils import config
+
+
+def split_datafile(data_file, image_root, delimiter="\t"):
+ '''
+ data_file: image path and info, which can be splitted by spacer
+ image_root: image path root
+ delimiter: delimiter
+ '''
+ gallery_images = []
+ gallery_docs = []
+ with open(data_file, 'r', encoding='utf-8') as f:
+ lines = f.readlines()
+ for _, ori_line in enumerate(lines):
+ line = ori_line.strip().split(delimiter)
+ text_num = len(line)
+ assert text_num >= 2, f"line({ori_line}) must be splitted into at least 2 parts, but got {text_num}"
+ image_file = os.path.join(image_root, line[0])
+
+ gallery_images.append(image_file)
+ gallery_docs.append(ori_line.strip())
+
+ return gallery_images, gallery_docs
+
+
+class GalleryBuilder(object):
+ def __init__(self, config):
+
+ self.config = config
+ self.rec_predictor = RecPredictor(config)
+ assert 'IndexProcess' in config.keys(), "Index config not found ... "
+ self.build(config['IndexProcess'])
+
+ def build(self, config):
+ '''
+ build index from scratch
+ '''
+ operation_method = config.get("index_operation", "new").lower()
+
+ gallery_images, gallery_docs = split_datafile(
+ config['data_file'], config['image_root'], config['delimiter'])
+
+ # when remove data in index, do not need extract fatures
+ if operation_method != "remove":
+ gallery_features = self._extract_features(gallery_images, config)
+ assert operation_method in [
+ "new", "remove", "append"
+ ], "Only append, remove and new operation are supported"
+
+ # vector.index: faiss index file
+ # id_map.pkl: use this file to map id to image_doc
+ if operation_method in ["remove", "append"]:
+ # if remove or append, vector.index and id_map.pkl must exist
+ assert os.path.join(
+ config["index_dir"], "vector.index"
+ ), "The vector.index dose not exist in {} when 'index_operation' is not None".format(
+ config["index_dir"])
+ assert os.path.join(
+ config["index_dir"], "id_map.pkl"
+ ), "The id_map.pkl dose not exist in {} when 'index_operation' is not None".format(
+ config["index_dir"])
+ index = faiss.read_index(
+ os.path.join(config["index_dir"], "vector.index"))
+ with open(os.path.join(config["index_dir"], "id_map.pkl"),
+ 'rb') as fd:
+ ids = pickle.load(fd)
+ assert index.ntotal == len(ids.keys(
+ )), "data number in index is not equal in in id_map"
+ else:
+ if not os.path.exists(config["index_dir"]):
+ os.makedirs(config["index_dir"], exist_ok=True)
+ index_method = config.get("index_method", "HNSW32")
+
+ # if IVF method, cal ivf number automaticlly
+ if index_method == "IVF":
+ index_method = index_method + str(
+ min(int(len(gallery_images) // 8), 65536)) + ",Flat"
+
+ # for binary index, add B at head of index_method
+ if config["dist_type"] == "hamming":
+ index_method = "B" + index_method
+
+ #dist_type
+ dist_type = faiss.METRIC_INNER_PRODUCT if config[
+ "dist_type"] == "IP" else faiss.METRIC_L2
+
+ #build index
+ if config["dist_type"] == "hamming":
+ index = faiss.index_binary_factory(config["embedding_size"],
+ index_method)
+ else:
+ index = faiss.index_factory(config["embedding_size"],
+ index_method, dist_type)
+ index = faiss.IndexIDMap2(index)
+ ids = {}
+
+ if config["index_method"] == "HNSW32":
+ logger.warning(
+ "The HNSW32 method dose not support 'remove' operation")
+
+ if operation_method != "remove":
+ # calculate id for new data
+ start_id = max(ids.keys()) + 1 if ids else 0
+ ids_now = (
+ np.arange(0, len(gallery_images)) + start_id).astype(np.int64)
+
+ # only train when new index file
+ if operation_method == "new":
+ if config["dist_type"] == "hamming":
+ index.add(gallery_features)
+ else:
+ index.train(gallery_features)
+
+ if not config["dist_type"] == "hamming":
+ index.add_with_ids(gallery_features, ids_now)
+
+ for i, d in zip(list(ids_now), gallery_docs):
+ ids[i] = d
+ else:
+ if config["index_method"] == "HNSW32":
+ raise RuntimeError(
+ "The index_method: HNSW32 dose not support 'remove' operation"
+ )
+ # remove ids in id_map, remove index data in faiss index
+ remove_ids = list(
+ filter(lambda k: ids.get(k) in gallery_docs, ids.keys()))
+ remove_ids = np.asarray(remove_ids)
+ index.remove_ids(remove_ids)
+ for k in remove_ids:
+ del ids[k]
+
+ # store faiss index file and id_map file
+ if config["dist_type"] == "hamming":
+ faiss.write_index_binary(
+ index, os.path.join(config["index_dir"], "vector.index"))
+ else:
+ faiss.write_index(
+ index, os.path.join(config["index_dir"], "vector.index"))
+
+ with open(os.path.join(config["index_dir"], "id_map.pkl"), 'wb') as fd:
+ pickle.dump(ids, fd)
+
+ def _extract_features(self, gallery_images, config):
+ # extract gallery features
+ if config["dist_type"] == "hamming":
+ gallery_features = np.zeros(
+ [len(gallery_images), config['embedding_size'] // 8],
+ dtype=np.uint8)
+ else:
+ gallery_features = np.zeros(
+ [len(gallery_images), config['embedding_size']],
+ dtype=np.float32)
+
+ #construct batch imgs and do inference
+ batch_size = config.get("batch_size", 32)
+ batch_img = []
+ for i, image_file in enumerate(tqdm(gallery_images)):
+ img = cv2.imread(image_file)
+ if img is None:
+ logger.error("img empty, please check {}".format(image_file))
+ exit()
+ img = img[:, :, ::-1]
+ batch_img.append(img)
+
+ if (i + 1) % batch_size == 0:
+ rec_feat = self.rec_predictor.predict(batch_img)
+ gallery_features[i - batch_size + 1:i + 1, :] = rec_feat
+ batch_img = []
+
+ if len(batch_img) > 0:
+ rec_feat = self.rec_predictor.predict(batch_img)
+ gallery_features[-len(batch_img):, :] = rec_feat
+ batch_img = []
+
+ return gallery_features
+
+
+def main(config):
+ GalleryBuilder(config)
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/build_general.yaml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/build_general.yaml
new file mode 100644
index 000000000..5b83ea4d4
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/build_general.yaml
@@ -0,0 +1,36 @@
+Global:
+ rec_inference_model_dir: "./models/general_PPLCNet_x2_5_lite_v1.0_infer"
+ batch_size: 32
+ use_gpu: False
+ enable_mkldnn: True
+ cpu_num_threads: 10
+ enable_benchmark: True
+ use_fp16: False
+ ir_optim: True
+ use_tensorrt: False
+ gpu_mem: 8000
+ enable_profile: False
+
+RecPreProcess:
+ transform_ops:
+ - ResizeImage:
+ size: 224
+ - NormalizeImage:
+ scale: 0.00392157
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ order: ''
+ - ToCHWImage:
+
+RecPostProcess: null
+
+# indexing engine config
+IndexProcess:
+ index_method: "HNSW32" # supported: HNSW32, IVF, Flat
+ image_root: ""
+ index_dir: "./images/index"
+ data_file: "./images/motorcyclebike_label_all_02.txt"
+ index_operation: "new" # suported: "append", "remove", "new"
+ delimiter: "\t"
+ dist_type: "IP"
+ embedding_size: 512
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/det_preprocess.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/det_preprocess.py
new file mode 100644
index 000000000..65db32dc3
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/det_preprocess.py
@@ -0,0 +1,216 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cv2
+import numpy as np
+
+
+def decode_image(im_file, im_info):
+ """read rgb image
+ Args:
+ im_file (str|np.ndarray): input can be image path or np.ndarray
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ if isinstance(im_file, str):
+ with open(im_file, 'rb') as f:
+ im_read = f.read()
+ data = np.frombuffer(im_read, dtype='uint8')
+ im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
+ else:
+ im = im_file
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+ im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
+ return im, im_info
+
+
+class DetResize(object):
+ """resize image by target_size and max_size
+ Args:
+ target_size (int): the target size of image
+ keep_ratio (bool): whether keep_ratio or not, default true
+ interp (int): method of resize
+ """
+
+ def __init__(
+ self,
+ target_size,
+ keep_ratio=True,
+ interp=cv2.INTER_LINEAR, ):
+ if isinstance(target_size, int):
+ target_size = [target_size, target_size]
+ self.target_size = target_size
+ self.keep_ratio = keep_ratio
+ self.interp = interp
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ assert len(self.target_size) == 2
+ assert self.target_size[0] > 0 and self.target_size[1] > 0
+ im_channel = im.shape[2]
+ im_scale_y, im_scale_x = self.generate_scale(im)
+ # set image_shape
+ im_info['input_shape'][1] = int(im_scale_y * im.shape[0])
+ im_info['input_shape'][2] = int(im_scale_x * im.shape[1])
+ print(0000000000000000000000000000000000000000)
+ print(im)
+ print(im_scale_x,im_scale_y,cv2.INTER_LINEAR,self.interp)
+ im = cv2.resize(
+ im,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=self.interp)
+ print(im)
+ im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
+ im_info['scale_factor'] = np.array(
+ [im_scale_y, im_scale_x]).astype('float32')
+ return im, im_info
+
+ def generate_scale(self, im):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ Returns:
+ im_scale_x: the resize ratio of X
+ im_scale_y: the resize ratio of Y
+ """
+ origin_shape = im.shape[:2]
+ im_c = im.shape[2]
+ if self.keep_ratio:
+ im_size_min = np.min(origin_shape)
+ im_size_max = np.max(origin_shape)
+ target_size_min = np.min(self.target_size)
+ target_size_max = np.max(self.target_size)
+ im_scale = float(target_size_min) / float(im_size_min)
+ if np.round(im_scale * im_size_max) > target_size_max:
+ im_scale = float(target_size_max) / float(im_size_max)
+ im_scale_x = im_scale
+ im_scale_y = im_scale
+ else:
+ resize_h, resize_w = self.target_size
+ im_scale_y = resize_h / float(origin_shape[0])
+ im_scale_x = resize_w / float(origin_shape[1])
+ return im_scale_y, im_scale_x
+
+
+class DetNormalizeImage(object):
+ """normalize image
+ Args:
+ mean (list): im - mean
+ std (list): im / std
+ is_scale (bool): whether need im / 255
+ is_channel_first (bool): if True: image shape is CHW, else: HWC
+ """
+
+ def __init__(self, mean, std, is_scale=True):
+ self.mean = mean
+ self.std = std
+ self.is_scale = is_scale
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ im = im.astype(np.float32, copy=False)
+ mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
+ std = np.array(self.std)[np.newaxis, np.newaxis, :]
+ if self.is_scale:
+ im = im / 255.0
+ print(im)
+ im -= mean
+ im /= std
+ return im, im_info
+
+
+class DetPermute(object):
+ """permute image
+ Args:
+ to_bgr (bool): whether convert RGB to BGR
+ channel_first (bool): whether convert HWC to CHW
+ """
+
+ def __init__(self, ):
+ super().__init__()
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ #im = im.transpose((2, 0, 1)).copy()
+ print("detprossssssss")
+ print(im)
+ im = im.transpose((2, 0, 1)).copy()
+ print(im)
+ return im, im_info
+
+
+class DetPadStride(object):
+ """ padding image for model with FPN , instead PadBatch(pad_to_stride, pad_gt) in original config
+ Args:
+ stride (bool): model with FPN need image shape % stride == 0
+ """
+
+ def __init__(self, stride=0):
+ self.coarsest_stride = stride
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ coarsest_stride = self.coarsest_stride
+ if coarsest_stride <= 0:
+ return im, im_info
+ im_c, im_h, im_w = im.shape
+ pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
+ pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
+ padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
+ padding_im[:, :im_h, :im_w] = im
+ return padding_im, im_info
+
+
+def det_preprocess(im, im_info, preprocess_ops):
+ for operator in preprocess_ops:
+ print(operator)
+ print(im)
+ print(666)
+ im, im_info = operator(im, im_info)
+ print(im)
+ return im, im_info
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/postprocess.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/postprocess.py
new file mode 100644
index 000000000..d26cbaa9a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/postprocess.py
@@ -0,0 +1,161 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import copy
+import shutil
+from functools import partial
+import importlib
+import numpy as np
+import paddle
+import paddle.nn.functional as F
+
+
+def build_postprocess(config):
+ if config is None:
+ return None
+
+ mod = importlib.import_module(__name__)
+ config = copy.deepcopy(config)
+
+ main_indicator = config.pop(
+ "main_indicator") if "main_indicator" in config else None
+ main_indicator = main_indicator if main_indicator else ""
+
+ func_list = []
+ for func in config:
+ func_list.append(getattr(mod, func)(**config[func]))
+ return PostProcesser(func_list, main_indicator)
+
+
+class PostProcesser(object):
+ def __init__(self, func_list, main_indicator="Topk"):
+ self.func_list = func_list
+ self.main_indicator = main_indicator
+
+ def __call__(self, x, image_file=None):
+ rtn = None
+ for func in self.func_list:
+ tmp = func(x, image_file)
+ if type(func).__name__ in self.main_indicator:
+ rtn = tmp
+ return rtn
+
+
+class Topk(object):
+ def __init__(self, topk=1, class_id_map_file=None):
+ assert isinstance(topk, (int, ))
+ self.class_id_map = self.parse_class_id_map(class_id_map_file)
+ self.topk = topk
+
+ def parse_class_id_map(self, class_id_map_file):
+ if class_id_map_file is None:
+ return None
+
+ if not os.path.exists(class_id_map_file):
+ print(
+ "Warning: If want to use your own label_dict, please input legal path!\nOtherwise label_names will be empty!"
+ )
+ return None
+
+ try:
+ class_id_map = {}
+ with open(class_id_map_file, "r") as fin:
+ lines = fin.readlines()
+ for line in lines:
+ partition = line.split("\n")[0].partition(" ")
+ class_id_map[int(partition[0])] = str(partition[-1])
+ except Exception as ex:
+ print(ex)
+ class_id_map = None
+ return class_id_map
+
+ def __call__(self, x, file_names=None, multilabel=False):
+ if file_names is not None:
+ assert x.shape[0] == len(file_names)
+ y = []
+ for idx, probs in enumerate(x):
+ index = probs.argsort(axis=0)[-self.topk:][::-1].astype(
+ "int32") if not multilabel else np.where(
+ probs >= 0.5)[0].astype("int32")
+ clas_id_list = []
+ score_list = []
+ label_name_list = []
+ for i in index:
+ clas_id_list.append(i.item())
+ score_list.append(probs[i].item())
+ if self.class_id_map is not None:
+ label_name_list.append(self.class_id_map[i.item()])
+ result = {
+ "class_ids": clas_id_list,
+ "scores": np.around(
+ score_list, decimals=5).tolist(),
+ }
+ if file_names is not None:
+ result["file_name"] = file_names[idx]
+ if label_name_list is not None:
+ result["label_names"] = label_name_list
+ y.append(result)
+ return y
+
+
+class MultiLabelTopk(Topk):
+ def __init__(self, topk=1, class_id_map_file=None):
+ super().__init__()
+
+ def __call__(self, x, file_names=None):
+ return super().__call__(x, file_names, multilabel=True)
+
+
+class SavePreLabel(object):
+ def __init__(self, save_dir):
+ if save_dir is None:
+ raise Exception(
+ "Please specify save_dir if SavePreLabel specified.")
+ self.save_dir = partial(os.path.join, save_dir)
+
+ def __call__(self, x, file_names=None):
+ if file_names is None:
+ return
+ assert x.shape[0] == len(file_names)
+ for idx, probs in enumerate(x):
+ index = probs.argsort(axis=0)[-1].astype("int32")
+ self.save(index, file_names[idx])
+
+ def save(self, id, image_file):
+ output_dir = self.save_dir(str(id))
+ os.makedirs(output_dir, exist_ok=True)
+ shutil.copy(image_file, output_dir)
+
+
+class Binarize(object):
+ def __init__(self, method="round"):
+ self.method = method
+ self.unit = np.array([[128, 64, 32, 16, 8, 4, 2, 1]]).T
+
+ def __call__(self, x, file_names=None):
+ if self.method == "round":
+ x = np.round(x + 1).astype("uint8") - 1
+
+ if self.method == "sign":
+ x = ((np.sign(x) + 1) / 2).astype("uint8")
+
+ embedding_size = x.shape[1]
+ assert embedding_size % 8 == 0, "The Binary index only support vectors with sizes multiple of 8"
+
+ byte = np.zeros([x.shape[0], embedding_size // 8], dtype=np.uint8)
+ for i in range(embedding_size // 8):
+ byte[:, i:i + 1] = np.dot(x[:, i * 8:(i + 1) * 8], self.unit)
+
+ return byte
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_cls.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_cls.py
new file mode 100644
index 000000000..cdeb32e48
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_cls.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+import cv2
+import numpy as np
+
+from utils import logger
+from utils import config
+from utils.predictor import Predictor
+from utils.get_image_list import get_image_list
+from python.preprocess import create_operators
+from python.postprocess import build_postprocess
+
+
+class ClsPredictor(Predictor):
+ def __init__(self, config):
+ super().__init__(config["Global"])
+
+ self.preprocess_ops = []
+ self.postprocess = None
+ if "PreProcess" in config:
+ if "transform_ops" in config["PreProcess"]:
+ self.preprocess_ops = create_operators(config["PreProcess"][
+ "transform_ops"])
+ if "PostProcess" in config:
+ self.postprocess = build_postprocess(config["PostProcess"])
+
+ # for whole_chain project to test each repo of paddle
+ self.benchmark = config["Global"].get("benchmark", False)
+ if self.benchmark:
+ import auto_log
+ import os
+ pid = os.getpid()
+ self.auto_logger = auto_log.AutoLogger(
+ model_name=config["Global"].get("model_name", "cls"),
+ model_precision='fp16'
+ if config["Global"]["use_fp16"] else 'fp32',
+ batch_size=config["Global"].get("batch_size", 1),
+ data_shape=[3, 224, 224],
+ save_path=config["Global"].get("save_log_path",
+ "./auto_log.log"),
+ inference_config=self.config,
+ pids=pid,
+ process_name=None,
+ gpu_ids=None,
+ time_keys=[
+ 'preprocess_time', 'inference_time', 'postprocess_time'
+ ],
+ warmup=2)
+
+ def predict(self, images):
+ input_names = self.paddle_predictor.get_input_names()
+ input_tensor = self.paddle_predictor.get_input_handle(input_names[0])
+
+ output_names = self.paddle_predictor.get_output_names()
+ output_tensor = self.paddle_predictor.get_output_handle(output_names[
+ 0])
+ if self.benchmark:
+ self.auto_logger.times.start()
+ if not isinstance(images, (list, )):
+ images = [images]
+ for idx in range(len(images)):
+ for ops in self.preprocess_ops:
+ images[idx] = ops(images[idx])
+ image = np.array(images)
+ if self.benchmark:
+ self.auto_logger.times.stamp()
+
+ input_tensor.copy_from_cpu(image)
+ self.paddle_predictor.run()
+ batch_output = output_tensor.copy_to_cpu()
+ if self.benchmark:
+ self.auto_logger.times.stamp()
+ if self.postprocess is not None:
+ batch_output = self.postprocess(batch_output)
+ if self.benchmark:
+ self.auto_logger.times.end(stamp=True)
+ return batch_output
+
+
+def main(config):
+ cls_predictor = ClsPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ batch_imgs = []
+ batch_names = []
+ cnt = 0
+ for idx, img_path in enumerate(image_list):
+ img = cv2.imread(img_path)
+ if img is None:
+ logger.warning(
+ "Image file failed to read and has been skipped. The path: {}".
+ format(img_path))
+ else:
+ img = img[:, :, ::-1]
+ batch_imgs.append(img)
+ img_name = os.path.basename(img_path)
+ batch_names.append(img_name)
+ cnt += 1
+
+ if cnt % config["Global"]["batch_size"] == 0 or (idx + 1
+ ) == len(image_list):
+ if len(batch_imgs) == 0:
+ continue
+ batch_results = cls_predictor.predict(batch_imgs)
+ for number, result_dict in enumerate(batch_results):
+ filename = batch_names[number]
+ clas_ids = result_dict["class_ids"]
+ scores_str = "[{}]".format(", ".join("{:.2f}".format(
+ r) for r in result_dict["scores"]))
+ label_names = result_dict["label_names"]
+ print("{}:\tclass id(s): {}, score(s): {}, label_name(s): {}".
+ format(filename, clas_ids, scores_str, label_names))
+ batch_imgs = []
+ batch_names = []
+ if cls_predictor.benchmark:
+ cls_predictor.auto_logger.report()
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_det.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_det.py
new file mode 100644
index 000000000..0b9c25a5a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_det.py
@@ -0,0 +1,195 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+from utils import logger
+from utils import config
+from utils.predictor import Predictor
+from utils.get_image_list import get_image_list
+from det_preprocess import det_preprocess
+from preprocess import create_operators
+from utils.draw_bbox import draw_bbox_results
+
+import os
+import argparse
+import time
+import yaml
+import ast
+from functools import reduce
+import cv2
+import numpy as np
+import paddle
+import requests
+import base64
+import json
+class DetPredictor(Predictor):
+ def __init__(self, config):
+ super().__init__(config["Global"],
+ config["Global"]["det_inference_model_dir"])
+
+ self.preprocess_ops = create_operators(config["DetPreProcess"][
+ "transform_ops"])
+ self.config = config
+
+ def preprocess(self, img):
+ im_info = {
+ 'scale_factor': np.array(
+ [1., 1.], dtype=np.float32),
+ 'im_shape': np.array(
+ img.shape[:2], dtype=np.float32),
+ 'input_shape': self.config["Global"]["image_shape"],
+ "scale_factor": np.array(
+ [1., 1.], dtype=np.float32)
+ }
+
+ im, im_info = det_preprocess(img, im_info, self.preprocess_ops)
+ print(111111111111111111111)
+ print(im)
+ inputs = self.create_inputs(im, im_info)
+ return inputs
+
+ def create_inputs(self, im, im_info):
+ """generate input for different model type
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ model_arch (str): model type
+ Returns:
+ inputs (dict): input of model
+ """
+ inputs = {}
+ inputs['image'] = np.array((im, )).astype('float32')
+ inputs['im_shape'] = np.array(
+ (im_info['im_shape'], )).astype('float32')
+ inputs['scale_factor'] = np.array(
+ (im_info['scale_factor'], )).astype('float32')
+ #print(inputs)
+ return inputs
+
+ def parse_det_results(self, pred, threshold, label_list):
+ max_det_results = self.config["Global"]["max_det_results"]
+ keep_indexes = pred[:, 1].argsort()[::-1][:max_det_results]
+ results = []
+ for idx in keep_indexes:
+ single_res = pred[idx]
+ class_id = int(single_res[0])
+ score = single_res[1]
+ bbox = single_res[2:]
+ if score < threshold:
+ continue
+ label_name = label_list[class_id]
+ '''
+ results.append({
+ "class_id": class_id,
+ "score": score,
+ "bbox": bbox,
+ "label_name": label_name,
+ })'''
+ results.append({
+ "bbox": bbox,
+ "rec_docs": "background",
+ "rec_scores": score,
+ })
+ return results
+
+ def predict(self, image, threshold=0.5, run_benchmark=False):
+ '''
+ Args:
+ image (str/np.ndarray): path of image/ np.ndarray read by cv2ps
+ threshold (float): threshold of predicted box' score
+ Returns:
+ results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
+ matix element:[class, score, x_min, y_min, x_max, y_max]
+ MaskRCNN's results include 'masks': np.ndarray:
+ shape: [N, im_h, im_w]
+ '''
+ inputs = self.preprocess(image)
+ print(str(inputs))
+ np_boxes = None
+ input_names = self.paddle_predictor.get_input_names()
+ print(input_names)
+ for i in range(len(input_names)):
+ input_tensor = self.paddle_predictor.get_input_handle(input_names[
+ i])
+ input_tensor.copy_from_cpu(inputs[input_names[i]])
+ print("!!!!!!!",inputs[input_names[i]])
+ t1 = time.time()
+ print(self.paddle_predictor.run())
+ output_names = self.paddle_predictor.get_output_names()
+ boxes_tensor = self.paddle_predictor.get_output_handle(output_names[0])
+
+ np_boxes = boxes_tensor.copy_to_cpu()
+ t2 = time.time()
+
+ print("Inference: {} ms per batch image".format((t2 - t1) * 1000.0))
+
+ # do not perform postprocess in benchmark mode
+ results = []
+ if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
+ print('[WARNNING] No object detected.')
+ results = np.array([])
+ else:
+ results = np_boxes
+
+ results = self.parse_det_results(results,
+ self.config["Global"]["threshold"],
+ self.config["Global"]["labe_list"])
+ return results
+
+
+def main(config):
+ det_predictor = DetPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ assert config["Global"]["batch_size"] == 1
+ for idx, image_file in enumerate(image_list):
+ img = cv2.imread(image_file)[:, :, ::-1]
+ output = det_predictor.predict(img)
+ print(output)
+ draw_bbox_results(img, output, image_file)
+
+ return image_file,output
+
+def cv2_to_base64_img(img):
+ data = cv2.imencode('.jpg', img)[1]
+ return base64.b64encode(data.tostring()).decode('utf8')
+
+def solve_output(output,image_file):
+ print(image_file)
+ img = cv2.imread(image_file)
+
+ for bbox in output:
+ left,top,right,bottom = int(bbox["bbox"][0]),int(bbox["bbox"][1]),int(bbox["bbox"][2]),int(bbox["bbox"][3])
+ print(left,top,right,bottom)
+ img_crop = img[top:bottom,left:right]
+ url = "http://123.157.241.94:36807/ppyolo_mbv3/prediction"
+ img2 = {"key": ["image"], "value": [cv2_to_base64_img(img_crop)]}
+ r = requests.post(url=url,data=json.dumps(img2), timeout=5)
+ r = r.json()
+ print(r)
+ result = eval(r['value'][0])[0]
+ cv2.putText(img,str(round(float(result["scores"][0]),2)),(left,top+30), cv2.FONT_HERSHEY_SIMPLEX,1.2,(0,255,0),2)
+ cv2.putText(img,str(result["label_names"][0]),(left,top+60), cv2.FONT_HERSHEY_SIMPLEX,1.2,(0,255,0),2)
+ cv2.rectangle(img,(left ,top),(right,bottom), (0, 0, 255), 2)
+ cv2.imwrite("./output/ppyolo_result" + image_file[image_file.rfind("/"):],img)
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ image_file,output = main(config)
+ #solve_output(output,image_file)
+
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_det_bak.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_det_bak.py
new file mode 100644
index 000000000..323d65ab1
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_det_bak.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+from utils import logger
+from utils import config
+from utils.predictor import Predictor
+from utils.get_image_list import get_image_list
+from det_preprocess import det_preprocess
+from preprocess import create_operators
+from utils.draw_bbox import draw_bbox_results
+
+import os
+import argparse
+import time
+import yaml
+import ast
+from functools import reduce
+import cv2
+import numpy as np
+import paddle
+
+
+class DetPredictor(Predictor):
+ def __init__(self, config):
+ super().__init__(config["Global"],
+ config["Global"]["det_inference_model_dir"])
+
+ self.preprocess_ops = create_operators(config["DetPreProcess"][
+ "transform_ops"])
+ self.config = config
+
+ def preprocess(self, img):
+ im_info = {
+ 'scale_factor': np.array(
+ [1., 1.], dtype=np.float32),
+ 'im_shape': np.array(
+ img.shape[:2], dtype=np.float32),
+ 'input_shape': self.config["Global"]["image_shape"],
+ "scale_factor": np.array(
+ [1., 1.], dtype=np.float32)
+ }
+ im, im_info = det_preprocess(img, im_info, self.preprocess_ops)
+ inputs = self.create_inputs(im, im_info)
+ return inputs
+
+ def create_inputs(self, im, im_info):
+ """generate input for different model type
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ model_arch (str): model type
+ Returns:
+ inputs (dict): input of model
+ """
+ inputs = {}
+ inputs['image'] = np.array((im, )).astype('float32')
+ inputs['im_shape'] = np.array(
+ (im_info['im_shape'], )).astype('float32')
+ inputs['scale_factor'] = np.array(
+ (im_info['scale_factor'], )).astype('float32')
+ print(inputs)
+ return inputs
+
+ def parse_det_results(self, pred, threshold, label_list):
+ max_det_results = self.config["Global"]["max_det_results"]
+ keep_indexes = pred[:, 1].argsort()[::-1][:max_det_results]
+ results = []
+ for idx in keep_indexes:
+ single_res = pred[idx]
+ class_id = int(single_res[0])
+ score = single_res[1]
+ bbox = single_res[2:]
+ if score < threshold:
+ continue
+ label_name = label_list[class_id]
+ '''
+ results.append({
+ "class_id": class_id,
+ "score": score,
+ "bbox": bbox,
+ "label_name": label_name,
+ })'''
+ results.append({
+ "bbox": bbox,
+ "rec_docs": "background",
+ "rec_scores": score,
+ })
+ return results
+
+ def predict(self, image, threshold=0.5, run_benchmark=False):
+ '''
+ Args:
+ image (str/np.ndarray): path of image/ np.ndarray read by cv2
+ threshold (float): threshold of predicted box' score
+ Returns:
+ results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
+ matix element:[class, score, x_min, y_min, x_max, y_max]
+ MaskRCNN's results include 'masks': np.ndarray:
+ shape: [N, im_h, im_w]
+ '''
+ inputs = self.preprocess(image)
+ np_boxes = None
+ input_names = self.paddle_predictor.get_input_names()
+
+ for i in range(len(input_names)):
+ input_tensor = self.paddle_predictor.get_input_handle(input_names[
+ i])
+ input_tensor.copy_from_cpu(inputs[input_names[i]])
+
+ t1 = time.time()
+ self.paddle_predictor.run()
+ output_names = self.paddle_predictor.get_output_names()
+ boxes_tensor = self.paddle_predictor.get_output_handle(output_names[0])
+ np_boxes = boxes_tensor.copy_to_cpu()
+ t2 = time.time()
+
+ print("Inference: {} ms per batch image".format((t2 - t1) * 1000.0))
+
+ # do not perform postprocess in benchmark mode
+ results = []
+ if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
+ print('[WARNNING] No object detected.')
+ results = np.array([])
+ else:
+ results = np_boxes
+
+ results = self.parse_det_results(results,
+ self.config["Global"]["threshold"],
+ self.config["Global"]["labe_list"])
+ return results
+
+
+def main(config):
+ det_predictor = DetPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ assert config["Global"]["batch_size"] == 1
+ for idx, image_file in enumerate(image_list):
+ img = cv2.imread(image_file)[:, :, ::-1]
+ output = det_predictor.predict(img)
+ print(output)
+ draw_bbox_results(img, output, image_file)
+ print(output)
+
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_rec.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_rec.py
new file mode 100644
index 000000000..d41c513f8
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_rec.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+import cv2
+import numpy as np
+
+from utils import logger
+from utils import config
+from utils.predictor import Predictor
+from utils.get_image_list import get_image_list
+from preprocess import create_operators
+from postprocess import build_postprocess
+
+
+class RecPredictor(Predictor):
+ def __init__(self, config):
+ super().__init__(config["Global"],
+ config["Global"]["rec_inference_model_dir"])
+ self.preprocess_ops = create_operators(config["RecPreProcess"][
+ "transform_ops"])
+ self.postprocess = build_postprocess(config["RecPostProcess"])
+
+ def predict(self, images, feature_normalize=True):
+ input_names = self.paddle_predictor.get_input_names()
+ input_tensor = self.paddle_predictor.get_input_handle(input_names[0])
+
+ output_names = self.paddle_predictor.get_output_names()
+ output_tensor = self.paddle_predictor.get_output_handle(output_names[
+ 0])
+
+ if not isinstance(images, (list, )):
+ images = [images]
+ for idx in range(len(images)):
+ for ops in self.preprocess_ops:
+ images[idx] = ops(images[idx])
+ image = np.array(images)
+
+ input_tensor.copy_from_cpu(image)
+ self.paddle_predictor.run()
+ batch_output = output_tensor.copy_to_cpu()
+
+ if feature_normalize:
+ feas_norm = np.sqrt(
+ np.sum(np.square(batch_output), axis=1, keepdims=True))
+ batch_output = np.divide(batch_output, feas_norm)
+
+ if self.postprocess is not None:
+ batch_output = self.postprocess(batch_output)
+ return batch_output
+
+
+def main(config):
+ rec_predictor = RecPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ batch_imgs = []
+ batch_names = []
+ cnt = 0
+ for idx, img_path in enumerate(image_list):
+ img = cv2.imread(img_path)
+ if img is None:
+ logger.warning(
+ "Image file failed to read and has been skipped. The path: {}".
+ format(img_path))
+ else:
+ img = img[:, :, ::-1]
+ batch_imgs.append(img)
+ img_name = os.path.basename(img_path)
+ batch_names.append(img_name)
+ cnt += 1
+
+ if cnt % config["Global"]["batch_size"] == 0 or (idx + 1) == len(image_list):
+ if len(batch_imgs) == 0:
+ continue
+
+ batch_results = rec_predictor.predict(batch_imgs)
+ for number, result_dict in enumerate(batch_results):
+ filename = batch_names[number]
+ print("{}:\t{}".format(filename, result_dict))
+ batch_imgs = []
+ batch_names = []
+
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_system.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_system.py
new file mode 100644
index 000000000..fb2d66a53
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/predict_system.py
@@ -0,0 +1,145 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+import copy
+import cv2
+import numpy as np
+import faiss
+import pickle
+
+from python.predict_rec import RecPredictor
+from python.predict_det import DetPredictor
+
+from utils import logger
+from utils import config
+from utils.get_image_list import get_image_list
+from utils.draw_bbox import draw_bbox_results
+
+
+class SystemPredictor(object):
+ def __init__(self, config):
+
+ self.config = config
+ self.rec_predictor = RecPredictor(config)
+ self.det_predictor = DetPredictor(config)
+
+ assert 'IndexProcess' in config.keys(), "Index config not found ... "
+ self.return_k = self.config['IndexProcess']['return_k']
+
+ index_dir = self.config["IndexProcess"]["index_dir"]
+ assert os.path.exists(os.path.join(
+ index_dir, "vector.index")), "vector.index not found ..."
+ assert os.path.exists(os.path.join(
+ index_dir, "id_map.pkl")), "id_map.pkl not found ... "
+
+ if config['IndexProcess'].get("binary_index", False):
+ self.Searcher = faiss.read_index_binary(
+ os.path.join(index_dir, "vector.index"))
+ else:
+ self.Searcher = faiss.read_index(
+ os.path.join(index_dir, "vector.index"))
+
+ with open(os.path.join(index_dir, "id_map.pkl"), "rb") as fd:
+ self.id_map = pickle.load(fd)
+
+ def append_self(self, results, shape):
+ results.append({
+ "class_id": 0,
+ "score": 1.0,
+ "bbox":
+ np.array([0, 0, shape[1], shape[0]]), # xmin, ymin, xmax, ymax
+ "label_name": "foreground",
+ })
+ return results
+
+ def nms_to_rec_results(self, results, thresh=0.1):
+ filtered_results = []
+ x1 = np.array([r["bbox"][0] for r in results]).astype("float32")
+ y1 = np.array([r["bbox"][1] for r in results]).astype("float32")
+ x2 = np.array([r["bbox"][2] for r in results]).astype("float32")
+ y2 = np.array([r["bbox"][3] for r in results]).astype("float32")
+ scores = np.array([r["rec_scores"] for r in results])
+
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+ order = scores.argsort()[::-1]
+ while order.size > 0:
+ i = order[0]
+ xx1 = np.maximum(x1[i], x1[order[1:]])
+ yy1 = np.maximum(y1[i], y1[order[1:]])
+ xx2 = np.minimum(x2[i], x2[order[1:]])
+ yy2 = np.minimum(y2[i], y2[order[1:]])
+
+ w = np.maximum(0.0, xx2 - xx1 + 1)
+ h = np.maximum(0.0, yy2 - yy1 + 1)
+ inter = w * h
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
+ inds = np.where(ovr <= thresh)[0]
+ order = order[inds + 1]
+ filtered_results.append(results[i])
+
+ return filtered_results
+
+ def predict(self, img):
+ output = []
+ # st1: get all detection results
+ results = self.det_predictor.predict(img)
+
+ # st2: add the whole image for recognition to improve recall
+ results = self.append_self(results, img.shape)
+
+ # st3: recognition process, use score_thres to ensure accuracy
+ for result in results:
+ preds = {}
+ xmin, ymin, xmax, ymax = result["bbox"].astype("int")
+ crop_img = img[ymin:ymax, xmin:xmax, :].copy()
+ rec_results = self.rec_predictor.predict(crop_img)
+ preds["bbox"] = [xmin, ymin, xmax, ymax]
+ scores, docs = self.Searcher.search(rec_results, self.return_k)
+
+ # just top-1 result will be returned for the final
+ if scores[0][0] >= self.config["IndexProcess"]["score_thres"]:
+ preds["rec_docs"] = self.id_map[docs[0][0]].split()[1]
+ preds["rec_scores"] = scores[0][0]
+ output.append(preds)
+
+ # st5: nms to the final results to avoid fetching duplicate results
+ output = self.nms_to_rec_results(
+ output, self.config["Global"]["rec_nms_thresold"])
+
+ return output
+
+
+def main(config):
+ system_predictor = SystemPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ assert config["Global"]["batch_size"] == 1
+ for idx, image_file in enumerate(image_list):
+ img = cv2.imread(image_file)[:, :, ::-1]
+ output = system_predictor.predict(img)
+ print(image_file)
+ draw_bbox_results(img, output, image_file)
+ print(output)
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/preprocess.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/preprocess.py
new file mode 100644
index 000000000..c4b6bca30
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/preprocess.py
@@ -0,0 +1,337 @@
+"""
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from functools import partial
+import six
+import math
+import random
+import cv2
+import numpy as np
+import importlib
+from PIL import Image
+
+from det_preprocess import DetNormalizeImage, DetPadStride, DetPermute, DetResize
+
+
+def create_operators(params):
+ """
+ create operators based on the config
+
+ Args:
+ params(list): a dict list, used to create some operators
+ """
+ assert isinstance(params, list), ('operator config should be a list')
+ mod = importlib.import_module(__name__)
+ ops = []
+ for operator in params:
+ assert isinstance(operator,
+ dict) and len(operator) == 1, "yaml format error"
+ op_name = list(operator)[0]
+ param = {} if operator[op_name] is None else operator[op_name]
+ op = getattr(mod, op_name)(**param)
+ ops.append(op)
+
+ return ops
+
+
+class UnifiedResize(object):
+ def __init__(self, interpolation=None, backend="cv2"):
+ _cv2_interp_from_str = {
+ 'nearest': cv2.INTER_NEAREST,
+ 'bilinear': cv2.INTER_LINEAR,
+ 'area': cv2.INTER_AREA,
+ 'bicubic': cv2.INTER_CUBIC,
+ 'lanczos': cv2.INTER_LANCZOS4
+ }
+ _pil_interp_from_str = {
+ 'nearest': Image.NEAREST,
+ 'bilinear': Image.BILINEAR,
+ 'bicubic': Image.BICUBIC,
+ 'box': Image.BOX,
+ 'lanczos': Image.LANCZOS,
+ 'hamming': Image.HAMMING
+ }
+
+ def _pil_resize(src, size, resample):
+ pil_img = Image.fromarray(src)
+ pil_img = pil_img.resize(size, resample)
+ return np.asarray(pil_img)
+
+ if backend.lower() == "cv2":
+ if isinstance(interpolation, str):
+ interpolation = _cv2_interp_from_str[interpolation.lower()]
+ # compatible with opencv < version 4.4.0
+ elif interpolation is None:
+ interpolation = cv2.INTER_LINEAR
+ self.resize_func = partial(cv2.resize, interpolation=interpolation)
+ elif backend.lower() == "pil":
+ if isinstance(interpolation, str):
+ interpolation = _pil_interp_from_str[interpolation.lower()]
+ self.resize_func = partial(_pil_resize, resample=interpolation)
+ else:
+ logger.warning(
+ f"The backend of Resize only support \"cv2\" or \"PIL\". \"f{backend}\" is unavailable. Use \"cv2\" instead."
+ )
+ self.resize_func = cv2.resize
+
+ def __call__(self, src, size):
+ return self.resize_func(src, size)
+
+
+class OperatorParamError(ValueError):
+ """ OperatorParamError
+ """
+ pass
+
+
+class DecodeImage(object):
+ """ decode image """
+
+ def __init__(self, to_rgb=True, to_np=False, channel_first=False):
+ self.to_rgb = to_rgb
+ self.to_np = to_np # to numpy
+ self.channel_first = channel_first # only enabled when to_np is True
+
+ def __call__(self, img):
+ if six.PY2:
+ assert type(img) is str and len(
+ img) > 0, "invalid input 'img' in DecodeImage"
+ else:
+ assert type(img) is bytes and len(
+ img) > 0, "invalid input 'img' in DecodeImage"
+ data = np.frombuffer(img, dtype='uint8')
+ img = cv2.imdecode(data, 1)
+ if self.to_rgb:
+ assert img.shape[2] == 3, 'invalid shape of image[%s]' % (
+ img.shape)
+ img = img[:, :, ::-1]
+
+ if self.channel_first:
+ img = img.transpose((2, 0, 1))
+
+ return img
+
+
+class ResizeImage(object):
+ """ resize image """
+
+ def __init__(self,
+ size=None,
+ resize_short=None,
+ interpolation=None,
+ backend="cv2"):
+ if resize_short is not None and resize_short > 0:
+ self.resize_short = resize_short
+ self.w = None
+ self.h = None
+ elif size is not None:
+ self.resize_short = None
+ self.w = size if type(size) is int else size[0]
+ self.h = size if type(size) is int else size[1]
+ else:
+ raise OperatorParamError("invalid params for ReisizeImage for '\
+ 'both 'size' and 'resize_short' are None")
+
+ self._resize_func = UnifiedResize(
+ interpolation=interpolation, backend=backend)
+
+ def __call__(self, img):
+ img_h, img_w = img.shape[:2]
+ if self.resize_short is not None:
+ percent = float(self.resize_short) / min(img_w, img_h)
+ w = int(round(img_w * percent))
+ h = int(round(img_h * percent))
+ else:
+ w = self.w
+ h = self.h
+ return self._resize_func(img, (w, h))
+
+
+class CropImage(object):
+ """ crop image """
+
+ def __init__(self, size):
+ if type(size) is int:
+ self.size = (size, size)
+ else:
+ self.size = size # (h, w)
+
+ def __call__(self, img):
+ w, h = self.size
+ img_h, img_w = img.shape[:2]
+
+ if img_h < h or img_w < w:
+ raise Exception(
+ f"The size({h}, {w}) of CropImage must be greater than size({img_h}, {img_w}) of image. Please check image original size and size of ResizeImage if used."
+ )
+
+ w_start = (img_w - w) // 2
+ h_start = (img_h - h) // 2
+
+ w_end = w_start + w
+ h_end = h_start + h
+ return img[h_start:h_end, w_start:w_end, :]
+
+
+class RandCropImage(object):
+ """ random crop image """
+
+ def __init__(self,
+ size,
+ scale=None,
+ ratio=None,
+ interpolation=None,
+ backend="cv2"):
+ if type(size) is int:
+ self.size = (size, size) # (h, w)
+ else:
+ self.size = size
+
+ self.scale = [0.08, 1.0] if scale is None else scale
+ self.ratio = [3. / 4., 4. / 3.] if ratio is None else ratio
+
+ self._resize_func = UnifiedResize(
+ interpolation=interpolation, backend=backend)
+
+ def __call__(self, img):
+ size = self.size
+ scale = self.scale
+ ratio = self.ratio
+
+ aspect_ratio = math.sqrt(random.uniform(*ratio))
+ w = 1. * aspect_ratio
+ h = 1. / aspect_ratio
+
+ img_h, img_w = img.shape[:2]
+
+ bound = min((float(img_w) / img_h) / (w**2),
+ (float(img_h) / img_w) / (h**2))
+ scale_max = min(scale[1], bound)
+ scale_min = min(scale[0], bound)
+
+ target_area = img_w * img_h * random.uniform(scale_min, scale_max)
+ target_size = math.sqrt(target_area)
+ w = int(target_size * w)
+ h = int(target_size * h)
+
+ i = random.randint(0, img_w - w)
+ j = random.randint(0, img_h - h)
+
+ img = img[j:j + h, i:i + w, :]
+
+ return self._resize_func(img, size)
+
+
+class RandFlipImage(object):
+ """ random flip image
+ flip_code:
+ 1: Flipped Horizontally
+ 0: Flipped Vertically
+ -1: Flipped Horizontally & Vertically
+ """
+
+ def __init__(self, flip_code=1):
+ assert flip_code in [-1, 0, 1
+ ], "flip_code should be a value in [-1, 0, 1]"
+ self.flip_code = flip_code
+
+ def __call__(self, img):
+ if random.randint(0, 1) == 1:
+ return cv2.flip(img, self.flip_code)
+ else:
+ return img
+
+
+class AutoAugment(object):
+ def __init__(self):
+ self.policy = ImageNetPolicy()
+
+ def __call__(self, img):
+ from PIL import Image
+ img = np.ascontiguousarray(img)
+ img = Image.fromarray(img)
+ img = self.policy(img)
+ img = np.asarray(img)
+
+
+class NormalizeImage(object):
+ """ normalize image such as substract mean, divide std
+ """
+
+ def __init__(self,
+ scale=None,
+ mean=None,
+ std=None,
+ order='chw',
+ output_fp16=False,
+ channel_num=3):
+ if isinstance(scale, str):
+ scale = eval(scale)
+ assert channel_num in [
+ 3, 4
+ ], "channel number of input image should be set to 3 or 4."
+ self.channel_num = channel_num
+ self.output_dtype = 'float16' if output_fp16 else 'float32'
+ self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
+ self.order = order
+ mean = mean if mean is not None else [0.485, 0.456, 0.406]
+ std = std if std is not None else [0.229, 0.224, 0.225]
+
+ shape = (3, 1, 1) if self.order == 'chw' else (1, 1, 3)
+ self.mean = np.array(mean).reshape(shape).astype('float32')
+ self.std = np.array(std).reshape(shape).astype('float32')
+
+ def __call__(self, img):
+ from PIL import Image
+ if isinstance(img, Image.Image):
+ img = np.array(img)
+
+ assert isinstance(img,
+ np.ndarray), "invalid input 'img' in NormalizeImage"
+
+ img = (img.astype('float32') * self.scale - self.mean) / self.std
+
+ if self.channel_num == 4:
+ img_h = img.shape[1] if self.order == 'chw' else img.shape[0]
+ img_w = img.shape[2] if self.order == 'chw' else img.shape[1]
+ pad_zeros = np.zeros(
+ (1, img_h, img_w)) if self.order == 'chw' else np.zeros(
+ (img_h, img_w, 1))
+ img = (np.concatenate(
+ (img, pad_zeros), axis=0)
+ if self.order == 'chw' else np.concatenate(
+ (img, pad_zeros), axis=2))
+ return img.astype(self.output_dtype)
+
+
+class ToCHWImage(object):
+ """ convert hwc image to chw image
+ """
+
+ def __init__(self):
+ pass
+
+ def __call__(self, img):
+ from PIL import Image
+ if isinstance(img, Image.Image):
+ img = np.array(img)
+
+ return img.transpose((2, 0, 1))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/.recognition_web_service_onlyrec.py.swp b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/.recognition_web_service_onlyrec.py.swp
new file mode 100644
index 000000000..3fbc33b04
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/.recognition_web_service_onlyrec.py.swp differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/PipelineServingLogs/pipeline.log b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/PipelineServingLogs/pipeline.log
new file mode 100644
index 000000000..2818e7bd5
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/PipelineServingLogs/pipeline.log
@@ -0,0 +1,2665 @@
+WARNING 2022-02-17 11:39:15,975 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 11:39:15,995 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 11:39:15,995 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:39:15,995 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 11:39:15,995 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 11:39:15,995 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 11:39:15,995 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 11:39:15,995 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 11:39:16,008 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 11:39:16,009 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 11:39:16,009 [dag.py:662] rec
+INFO 2022-02-17 11:39:16,009 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 11:39:16,009 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 11:39:16,009 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 11:39:16,009 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 11:39:16,009 [dag.py:686] - rec
+INFO 2022-02-17 11:39:16,009 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 11:39:16,009 [dag.py:684] [rec]
+INFO 2022-02-17 11:39:16,009 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 11:39:16,075 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 11:39:16,084 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 11:39:16,085 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 11:39:16,089 [dag.py:832] [DAG] start
+INFO 2022-02-17 11:39:16,090 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 11:39:16,092 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 11:39:16,099 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:39:16,099 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 11:39:16,099 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 11:39:17,129 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 11:39:19,011 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 11:39:25,739 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645069165.7397573
+INFO 2022-02-17 11:39:25,740 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645069165.7409468
+INFO 2022-02-17 11:39:25,741 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2022-02-17 11:39:28,512 [operator.py:1109] (data_id=0 log_id=0) [rec|0] Failed to postprocess: postprocess() takes 4 positional arguments but 5 were given
+Traceback (most recent call last):
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1105, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: postprocess() takes 4 positional arguments but 5 were given
+ERROR 2022-02-17 11:39:28,515 [dag.py:410] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [rec|0] Failed to postprocess: postprocess() takes 4 positional arguments but 5 were given
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 11:40:11,401 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 11:40:11,401 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:40:11,401 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 11:40:11,401 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 11:40:11,402 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 11:40:11,402 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 11:40:11,402 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 11:40:11,414 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 11:40:11,414 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 11:40:11,415 [dag.py:662] rec
+INFO 2022-02-17 11:40:11,415 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 11:40:11,415 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 11:40:11,415 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 11:40:11,415 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 11:40:11,415 [dag.py:686] - rec
+INFO 2022-02-17 11:40:11,415 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 11:40:11,415 [dag.py:684] [rec]
+INFO 2022-02-17 11:40:11,415 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 11:40:11,430 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 11:40:11,439 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 11:40:11,440 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 11:40:11,468 [dag.py:832] [DAG] start
+INFO 2022-02-17 11:40:11,469 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 11:40:11,471 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 11:40:11,479 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:40:11,480 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 11:40:11,480 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 11:40:12,540 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 11:40:14,355 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 11:40:26,642 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645069226.6426268
+INFO 2022-02-17 11:40:26,643 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645069226.643837
+INFO 2022-02-17 11:40:26,644 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 11:40:29,377 [dag.py:405] (data_id=0 log_id=0) Succ predict
+INFO 2022-02-17 11:41:41,337 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645069301.3372242
+INFO 2022-02-17 11:41:41,337 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645069301.3376215
+INFO 2022-02-17 11:41:41,337 [dag.py:369] (data_id=1 log_id=0) Succ Generate ID
+INFO 2022-02-17 11:41:41,351 [dag.py:405] (data_id=1 log_id=0) Succ predict
+WARNING 2022-02-17 11:43:51,478 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:43:51,478 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:43:51,478 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 11:43:51,578 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 11:43:51,579 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:43:51,579 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 11:43:51,579 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 11:43:51,579 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 11:43:51,579 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 11:43:51,579 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 11:43:51,591 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 11:43:51,592 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 11:43:51,592 [dag.py:662] rec
+INFO 2022-02-17 11:43:51,592 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 11:43:51,592 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 11:43:51,592 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 11:43:51,592 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 11:43:51,593 [dag.py:686] - rec
+INFO 2022-02-17 11:43:51,593 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 11:43:51,593 [dag.py:684] [rec]
+INFO 2022-02-17 11:43:51,593 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 11:43:51,608 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 11:43:51,617 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 11:43:51,618 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 11:43:51,622 [dag.py:832] [DAG] start
+INFO 2022-02-17 11:43:51,623 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 11:43:51,625 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 11:43:51,632 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:43:51,632 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 11:43:51,632 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 11:43:52,682 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 11:43:54,499 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 11:44:01,419 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645069441.419735
+INFO 2022-02-17 11:44:01,421 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645069441.4209738
+INFO 2022-02-17 11:44:01,421 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 11:44:04,142 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 11:45:00,983 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 11:45:00,983 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:45:00,984 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 11:45:00,984 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 11:45:00,984 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 11:45:00,984 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 11:45:00,984 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 11:45:00,996 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 11:45:00,997 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 11:45:00,997 [dag.py:662] rec
+INFO 2022-02-17 11:45:00,997 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 11:45:00,997 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 11:45:00,997 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 11:45:00,997 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 11:45:00,997 [dag.py:686] - rec
+INFO 2022-02-17 11:45:00,997 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 11:45:00,997 [dag.py:684] [rec]
+INFO 2022-02-17 11:45:00,997 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 11:45:01,012 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 11:45:01,022 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 11:45:01,022 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 11:45:01,026 [dag.py:832] [DAG] start
+INFO 2022-02-17 11:45:01,027 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 11:45:01,029 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 11:45:01,036 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:45:01,037 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 11:45:01,037 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 11:45:02,091 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 11:45:03,902 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 11:45:06,447 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645069506.4474986
+INFO 2022-02-17 11:45:06,448 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645069506.4486632
+INFO 2022-02-17 11:45:06,449 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 11:45:09,237 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 11:49:47,978 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 11:49:47,979 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:49:47,979 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 11:49:47,979 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 11:49:47,979 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 11:49:47,979 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 11:49:47,979 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 11:49:47,991 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 11:49:47,992 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 11:49:47,992 [dag.py:662] rec
+INFO 2022-02-17 11:49:47,992 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 11:49:47,992 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 11:49:47,992 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 11:49:47,992 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 11:49:47,992 [dag.py:686] - rec
+INFO 2022-02-17 11:49:47,992 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 11:49:47,992 [dag.py:684] [rec]
+INFO 2022-02-17 11:49:47,992 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 11:49:48,007 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 11:49:48,017 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 11:49:48,017 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 11:49:48,021 [dag.py:832] [DAG] start
+INFO 2022-02-17 11:49:48,022 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 11:49:48,024 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 11:49:48,030 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:49:48,030 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 11:49:48,030 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 11:49:49,065 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 11:49:50,923 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 11:49:51,725 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645069791.7250278
+INFO 2022-02-17 11:49:51,726 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645069791.7262204
+INFO 2022-02-17 11:49:51,726 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+WARNING 2022-02-17 11:50:16,675 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 11:50:16,693 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 11:50:16,694 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:50:16,694 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 11:50:16,694 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 11:50:16,694 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 11:50:16,694 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 11:50:16,694 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 11:50:16,706 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 11:50:16,707 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 11:50:16,707 [dag.py:662] rec
+INFO 2022-02-17 11:50:16,707 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 11:50:16,707 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 11:50:16,707 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 11:50:16,707 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 11:50:16,707 [dag.py:686] - rec
+INFO 2022-02-17 11:50:16,707 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 11:50:16,707 [dag.py:684] [rec]
+INFO 2022-02-17 11:50:16,707 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 11:50:16,783 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 11:50:16,792 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 11:50:16,793 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 11:50:16,797 [dag.py:832] [DAG] start
+INFO 2022-02-17 11:50:16,798 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 11:50:16,800 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 11:50:16,807 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:50:16,807 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 11:50:16,807 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 11:50:17,851 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 11:50:19,691 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 11:50:25,635 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645069825.635539
+INFO 2022-02-17 11:50:25,636 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645069825.6367557
+INFO 2022-02-17 11:50:25,637 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 11:51:35,002 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 11:51:35,003 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:51:35,003 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 11:51:35,003 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 11:51:35,003 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 11:51:35,003 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 11:51:35,003 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 11:51:35,014 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 11:51:35,015 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 11:51:35,015 [dag.py:662] rec
+INFO 2022-02-17 11:51:35,015 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 11:51:35,015 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 11:51:35,015 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 11:51:35,015 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 11:51:35,015 [dag.py:686] - rec
+INFO 2022-02-17 11:51:35,015 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 11:51:35,015 [dag.py:684] [rec]
+INFO 2022-02-17 11:51:35,015 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 11:51:35,030 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 11:51:35,040 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 11:51:35,040 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 11:51:35,043 [dag.py:832] [DAG] start
+INFO 2022-02-17 11:51:35,044 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 11:51:35,046 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 11:51:35,052 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 11:51:35,053 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 11:51:35,053 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 11:51:36,063 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 11:51:37,907 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 11:51:40,088 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645069900.0879674
+INFO 2022-02-17 11:51:40,089 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645069900.0891907
+INFO 2022-02-17 11:51:40,089 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 11:51:42,801 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 13:15:19,783 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 13:15:19,783 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:15:19,783 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 13:15:19,784 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 13:15:19,784 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 13:15:19,784 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 13:15:19,784 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 13:15:19,796 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 13:15:19,797 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 13:15:19,797 [dag.py:662] rec
+INFO 2022-02-17 13:15:19,797 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 13:15:19,797 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 13:15:19,797 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 13:15:19,797 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 13:15:19,797 [dag.py:686] - rec
+INFO 2022-02-17 13:15:19,797 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 13:15:19,797 [dag.py:684] [rec]
+INFO 2022-02-17 13:15:19,797 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 13:15:19,813 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 13:15:19,822 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 13:15:19,823 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 13:15:19,827 [dag.py:832] [DAG] start
+INFO 2022-02-17 13:15:19,828 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 13:15:19,830 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 13:15:19,837 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:15:19,837 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 13:15:19,838 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 13:15:20,885 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 13:15:22,721 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 13:15:27,334 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645074927.3338962
+INFO 2022-02-17 13:15:27,335 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645074927.3350475
+INFO 2022-02-17 13:15:27,335 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 13:15:30,083 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 13:24:47,291 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 13:24:47,303 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 13:24:47,303 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:24:47,303 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 13:24:47,303 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 13:24:47,303 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 13:24:47,303 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 13:24:47,303 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 13:24:47,314 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 13:24:47,315 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 13:24:47,315 [dag.py:662] rec
+INFO 2022-02-17 13:24:47,315 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 13:24:47,315 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 13:24:47,315 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 13:24:47,315 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 13:24:47,315 [dag.py:686] - rec
+INFO 2022-02-17 13:24:47,315 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 13:24:47,315 [dag.py:684] [rec]
+INFO 2022-02-17 13:24:47,315 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 13:24:47,330 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 13:24:47,340 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 13:24:47,340 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 13:24:47,344 [dag.py:832] [DAG] start
+INFO 2022-02-17 13:24:47,345 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 13:24:47,347 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 13:24:47,354 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:24:47,354 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 13:24:47,354 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 13:24:48,445 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 13:24:50,308 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 13:24:52,415 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645075492.4151294
+INFO 2022-02-17 13:24:52,416 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645075492.416351
+INFO 2022-02-17 13:24:52,416 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 13:24:55,206 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 13:25:34,907 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 13:25:34,980 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 13:25:34,980 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:25:34,980 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 13:25:34,981 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 13:25:34,981 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 13:25:34,981 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 13:25:34,981 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 13:25:34,993 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 13:25:34,994 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 13:25:34,994 [dag.py:662] rec
+INFO 2022-02-17 13:25:34,994 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 13:25:34,994 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 13:25:34,994 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 13:25:34,994 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 13:25:34,994 [dag.py:686] - rec
+INFO 2022-02-17 13:25:34,994 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 13:25:34,994 [dag.py:684] [rec]
+INFO 2022-02-17 13:25:34,994 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 13:25:35,010 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 13:25:35,019 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 13:25:35,020 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 13:25:35,024 [dag.py:832] [DAG] start
+INFO 2022-02-17 13:25:35,024 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 13:25:35,026 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 13:25:35,033 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:25:35,034 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 13:25:35,034 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 13:25:36,103 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 13:25:37,964 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 13:25:39,558 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645075539.5582185
+INFO 2022-02-17 13:25:39,559 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645075539.5594082
+INFO 2022-02-17 13:25:39,559 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 13:28:36,002 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 13:28:36,002 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:28:36,002 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 13:28:36,002 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 13:28:36,002 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 13:28:36,002 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 13:28:36,002 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 13:28:36,014 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 13:28:36,014 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 13:28:36,015 [dag.py:662] rec
+INFO 2022-02-17 13:28:36,015 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 13:28:36,015 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 13:28:36,015 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 13:28:36,015 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 13:28:36,015 [dag.py:686] - rec
+INFO 2022-02-17 13:28:36,015 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 13:28:36,015 [dag.py:684] [rec]
+INFO 2022-02-17 13:28:36,015 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 13:28:36,030 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 13:28:36,039 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 13:28:36,039 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 13:28:36,043 [dag.py:832] [DAG] start
+INFO 2022-02-17 13:28:36,044 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 13:28:36,046 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 13:28:36,053 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:28:36,053 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 13:28:36,053 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 13:28:37,113 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 13:28:38,927 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 13:28:44,185 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645075724.1851497
+INFO 2022-02-17 13:28:44,186 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645075724.186309
+INFO 2022-02-17 13:28:44,186 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 13:28:46,924 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:33:50,875 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:33:50,875 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 13:33:50,887 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 13:33:50,887 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:33:50,887 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 13:33:50,887 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 13:33:50,887 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 13:33:50,887 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 13:33:50,887 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 13:33:50,901 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 13:33:50,902 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 13:33:50,902 [dag.py:662] rec
+INFO 2022-02-17 13:33:50,902 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 13:33:50,902 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 13:33:50,902 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 13:33:50,902 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 13:33:50,902 [dag.py:686] - rec
+INFO 2022-02-17 13:33:50,902 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 13:33:50,902 [dag.py:684] [rec]
+INFO 2022-02-17 13:33:50,902 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 13:33:50,918 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 13:33:50,977 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 13:33:50,977 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 13:33:50,982 [dag.py:832] [DAG] start
+INFO 2022-02-17 13:33:50,983 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 13:33:50,985 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 13:33:50,991 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:33:50,992 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 13:33:50,992 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 13:33:52,067 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 13:33:53,924 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 13:34:04,445 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645076044.4450066
+INFO 2022-02-17 13:34:04,446 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645076044.4461415
+INFO 2022-02-17 13:34:04,446 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 13:34:07,203 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:38:14,876 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 13:38:14,971 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 13:38:14,971 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:38:14,971 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 13:38:14,971 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 13:38:14,971 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 13:38:14,971 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 13:38:14,972 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 13:38:14,985 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 13:38:14,986 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 13:38:14,986 [dag.py:662] rec
+INFO 2022-02-17 13:38:14,986 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 13:38:14,986 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 13:38:14,986 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 13:38:14,986 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 13:38:14,986 [dag.py:686] - rec
+INFO 2022-02-17 13:38:14,986 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 13:38:14,986 [dag.py:684] [rec]
+INFO 2022-02-17 13:38:14,986 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 13:38:15,002 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 13:38:15,011 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 13:38:15,012 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 13:38:15,016 [dag.py:832] [DAG] start
+INFO 2022-02-17 13:38:15,017 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 13:38:15,019 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 13:38:15,026 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:38:15,026 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 13:38:15,026 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 13:38:16,054 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 13:38:17,886 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 13:38:25,102 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645076305.1018336
+INFO 2022-02-17 13:38:25,103 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645076305.1030562
+INFO 2022-02-17 13:38:25,103 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 13:38:27,823 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 13:41:58,835 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:41:58,837 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 13:41:58,878 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 13:41:58,879 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:41:58,879 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 13:41:58,879 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 13:41:58,879 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 13:41:58,879 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 13:41:58,879 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 13:41:58,891 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 13:41:58,892 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 13:41:58,892 [dag.py:662] rec
+INFO 2022-02-17 13:41:58,892 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 13:41:58,892 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 13:41:58,892 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 13:41:58,892 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 13:41:58,892 [dag.py:686] - rec
+INFO 2022-02-17 13:41:58,892 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 13:41:58,892 [dag.py:684] [rec]
+INFO 2022-02-17 13:41:58,892 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 13:41:58,908 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 13:41:58,918 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 13:41:58,918 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 13:41:58,922 [dag.py:832] [DAG] start
+INFO 2022-02-17 13:41:58,923 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 13:41:58,925 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 13:41:58,931 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:41:58,932 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 13:41:58,932 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 13:41:59,935 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 13:42:02,434 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 13:42:28,077 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645076548.076952
+INFO 2022-02-17 13:42:28,078 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645076548.078083
+INFO 2022-02-17 13:42:28,078 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 13:42:30,814 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 13:45:45,888 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 13:45:45,888 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:45:45,888 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 13:45:45,888 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 13:45:45,888 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 13:45:45,889 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 13:45:45,889 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 13:45:45,900 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 13:45:45,901 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 13:45:45,901 [dag.py:662] rec
+INFO 2022-02-17 13:45:45,901 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 13:45:45,901 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 13:45:45,901 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 13:45:45,901 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 13:45:45,901 [dag.py:686] - rec
+INFO 2022-02-17 13:45:45,901 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 13:45:45,901 [dag.py:684] [rec]
+INFO 2022-02-17 13:45:45,901 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 13:45:45,916 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 13:45:45,926 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 13:45:45,926 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 13:45:45,930 [dag.py:832] [DAG] start
+INFO 2022-02-17 13:45:45,931 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 13:45:45,933 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 13:45:45,939 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:45:45,940 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 13:45:45,940 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 13:45:46,919 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 13:45:49,242 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645076749.2424037
+INFO 2022-02-17 13:45:49,243 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645076749.2436109
+INFO 2022-02-17 13:45:49,244 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 13:45:49,645 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 13:45:52,450 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 13:48:08,096 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:48:08,170 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:48:08,170 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 13:48:08,181 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 13:48:08,182 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:48:08,182 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 13:48:08,182 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 13:48:08,182 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 13:48:08,182 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 13:48:08,182 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 13:48:08,195 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 13:48:08,196 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 13:48:08,196 [dag.py:662] rec
+INFO 2022-02-17 13:48:08,196 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 13:48:08,196 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 13:48:08,196 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 13:48:08,196 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 13:48:08,196 [dag.py:686] - rec
+INFO 2022-02-17 13:48:08,196 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 13:48:08,196 [dag.py:684] [rec]
+INFO 2022-02-17 13:48:08,196 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 13:48:08,213 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 13:48:08,222 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 13:48:08,223 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 13:48:08,227 [dag.py:832] [DAG] start
+INFO 2022-02-17 13:48:08,228 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 13:48:08,230 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 13:48:08,275 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 13:48:08,276 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 13:48:08,276 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 13:48:09,338 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 13:48:11,286 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645076891.2866788
+INFO 2022-02-17 13:48:11,287 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645076891.2878144
+INFO 2022-02-17 13:48:11,288 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 13:48:12,468 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 13:48:15,399 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:00:34,472 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 14:00:34,483 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 14:00:34,483 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:00:34,483 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 14:00:34,483 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 14:00:34,483 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 14:00:34,483 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 14:00:34,483 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 14:00:34,495 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 14:00:34,496 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 14:00:34,496 [dag.py:662] rec
+INFO 2022-02-17 14:00:34,496 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 14:00:34,496 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 14:00:34,496 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 14:00:34,496 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 14:00:34,497 [dag.py:686] - rec
+INFO 2022-02-17 14:00:34,497 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 14:00:34,497 [dag.py:684] [rec]
+INFO 2022-02-17 14:00:34,497 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 14:00:34,512 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 14:00:34,522 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 14:00:34,522 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 14:00:34,526 [dag.py:832] [DAG] start
+INFO 2022-02-17 14:00:34,527 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 14:00:34,529 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 14:00:34,536 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:00:34,536 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 14:00:34,536 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 14:00:35,542 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 14:00:38,036 [operator.py:1317] [rec|0] Succ init
+WARNING 2022-02-17 14:00:53,138 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:00:53,140 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:00:53,140 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:00:53,140 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:00:53,140 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 14:00:56,641 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645077656.6412687
+INFO 2022-02-17 14:00:56,642 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645077656.642419
+INFO 2022-02-17 14:00:56,642 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 14:00:59,372 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 14:09:35,769 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 14:09:35,783 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 2, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 14:09:35,783 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:09:35,783 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 14:09:35,783 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 14:09:35,783 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":2,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 14:09:35,783 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 14:09:35,783 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 14:09:35,797 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 14:09:35,798 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 14:09:35,798 [dag.py:662] rec
+INFO 2022-02-17 14:09:35,798 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 14:09:35,798 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 14:09:35,798 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 14:09:35,798 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 14:09:35,798 [dag.py:686] - rec
+INFO 2022-02-17 14:09:35,798 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 14:09:35,799 [dag.py:684] [rec]
+INFO 2022-02-17 14:09:35,799 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 14:09:35,816 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 14:09:35,826 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 14:09:35,827 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 14:09:35,832 [dag.py:832] [DAG] start
+INFO 2022-02-17 14:09:35,833 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 14:09:35,870 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 14:09:35,876 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:09:35,877 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 14:09:35,877 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 14:09:36,950 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 14:09:39,525 [operator.py:1317] [rec|0] Succ init
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:10:28,773 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 14:10:28,784 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 1, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 14:10:28,784 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:10:28,784 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 14:10:28,784 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 14:10:28,784 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":1,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 14:10:28,784 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 14:10:28,784 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 14:10:28,796 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 14:10:28,797 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 14:10:28,797 [dag.py:662] rec
+INFO 2022-02-17 14:10:28,797 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 14:10:28,797 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 14:10:28,797 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 14:10:28,797 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 14:10:28,797 [dag.py:686] - rec
+INFO 2022-02-17 14:10:28,798 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 14:10:28,798 [dag.py:684] [rec]
+INFO 2022-02-17 14:10:28,798 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 14:10:28,813 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 14:10:28,822 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 14:10:28,823 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 14:10:28,827 [dag.py:832] [DAG] start
+INFO 2022-02-17 14:10:28,827 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 14:10:28,870 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 14:10:28,873 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:10:28,873 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 14:10:28,873 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 14:10:29,826 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:False, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 14:10:32,335 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 14:10:33,070 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645078233.070603
+INFO 2022-02-17 14:10:33,071 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645078233.0718045
+INFO 2022-02-17 14:10:33,072 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 14:10:35,794 [dag.py:405] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-17 14:14:29,769 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 14:14:29,783 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 1, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 14:14:29,783 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:14:29,783 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 14:14:29,783 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 14:14:29,783 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":1,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 14:14:29,783 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 14:14:29,783 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 14:14:29,795 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 14:14:29,796 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 14:14:29,796 [dag.py:662] rec
+INFO 2022-02-17 14:14:29,796 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 14:14:29,796 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 14:14:29,796 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 14:14:29,796 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 14:14:29,796 [dag.py:686] - rec
+INFO 2022-02-17 14:14:29,796 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 14:14:29,796 [dag.py:684] [rec]
+INFO 2022-02-17 14:14:29,796 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 14:14:29,812 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 14:14:29,822 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 14:14:29,823 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 14:14:29,827 [dag.py:832] [DAG] start
+INFO 2022-02-17 14:14:29,827 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 14:14:29,829 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 14:14:29,837 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:14:29,837 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 14:14:29,837 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 14:14:30,832 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:False, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 14:14:33,327 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 14:14:39,181 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645078479.1810365
+INFO 2022-02-17 14:14:39,182 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645078479.1821644
+INFO 2022-02-17 14:14:39,182 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 14:14:41,917 [dag.py:405] (data_id=0 log_id=0) Succ predict
+INFO 2022-02-17 14:26:17,673 [loader.py:54] Loading faiss with AVX2 support.
+INFO 2022-02-17 14:26:17,673 [loader.py:58] Could not load library with AVX2 support due to:
+ModuleNotFoundError("No module named 'faiss.swigfaiss_avx2'")
+INFO 2022-02-17 14:26:17,673 [loader.py:64] Loading faiss.
+INFO 2022-02-17 14:26:17,693 [loader.py:66] Successfully loaded faiss.
+WARNING 2022-02-17 14:26:17,697 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:26:17,697 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-17 14:26:17,711 [operator.py:181] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 1, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-17 14:26:17,711 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:26:17,712 [operator.py:285] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-17 14:26:17,712 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-17 14:26:17,712 [pipeline_server.py:218]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ },
+ "channel_recv_frist_arrive":false
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":1,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-17 14:26:17,712 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-17 14:26:17,712 [operator.py:308] Op(rec) use local rpc service at port: []
+INFO 2022-02-17 14:26:17,726 [dag.py:496] [DAG] Succ init
+INFO 2022-02-17 14:26:17,727 [dag.py:659] ================= USED OP =================
+INFO 2022-02-17 14:26:17,727 [dag.py:662] rec
+INFO 2022-02-17 14:26:17,727 [dag.py:663] -------------------------------------------
+INFO 2022-02-17 14:26:17,727 [dag.py:680] ================== DAG ====================
+INFO 2022-02-17 14:26:17,727 [dag.py:682] (VIEW 0)
+INFO 2022-02-17 14:26:17,727 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-17 14:26:17,727 [dag.py:686] - rec
+INFO 2022-02-17 14:26:17,727 [dag.py:682] (VIEW 1)
+INFO 2022-02-17 14:26:17,727 [dag.py:684] [rec]
+INFO 2022-02-17 14:26:17,727 [dag.py:687] -------------------------------------------
+INFO 2022-02-17 14:26:17,743 [dag.py:730] op:rec add input channel.
+INFO 2022-02-17 14:26:17,757 [dag.py:759] last op:rec add output channel
+INFO 2022-02-17 14:26:17,758 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-17 14:26:17,764 [dag.py:832] [DAG] start
+INFO 2022-02-17 14:26:17,765 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-17 14:26:17,767 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-17 14:26:17,777 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-17 14:26:17,777 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-17 14:26:17,777 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-17 14:26:18,830 [local_predict.py:153] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:False, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-17 14:26:20,559 [operator.py:1317] [rec|0] Succ init
+INFO 2022-02-17 14:26:37,796 [pipeline_server.py:56] (log_id=0) inference request name:recognition self.name:recognition time:1645079197.7964764
+INFO 2022-02-17 14:26:37,797 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction, time:1645079197.7977042
+INFO 2022-02-17 14:26:37,798 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-17 14:26:39,784 [dag.py:405] (data_id=0 log_id=0) Succ predict
+INFO 2022-02-22 13:20:19,039 [loader.py:54] Loading faiss with AVX2 support.
+INFO 2022-02-22 13:20:19,040 [loader.py:58] Could not load library with AVX2 support due to:
+ModuleNotFoundError("No module named 'faiss.swigfaiss_avx2'")
+INFO 2022-02-22 13:20:19,040 [loader.py:64] Loading faiss.
+INFO 2022-02-22 13:20:19,062 [loader.py:66] Successfully loaded faiss.
+WARNING 2022-02-22 13:20:19,068 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:20:19,068 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] tracer not set, use default: {}
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-22 13:20:19,070 [operator.py:163] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 1, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['feature'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-22 13:20:19,070 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['feature'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:20:19,070 [operator.py:267] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['feature']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-22 13:20:19,070 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2022-02-22 13:20:19,070 [pipeline_server.py:207]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ }
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":1,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "feature"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-22 13:20:19,070 [pipeline_server.py:212] -------------------------------------------
+INFO 2022-02-22 13:20:19,070 [operator.py:290] Op(rec) use local rpc service at port: []
+INFO 2022-02-22 13:20:19,085 [dag.py:493] [DAG] Succ init
+INFO 2022-02-22 13:20:19,086 [dag.py:651] ================= USED OP =================
+INFO 2022-02-22 13:20:19,086 [dag.py:654] rec
+INFO 2022-02-22 13:20:19,086 [dag.py:655] -------------------------------------------
+INFO 2022-02-22 13:20:19,115 [dag.py:784] [DAG] Succ build DAG
+INFO 2022-02-22 13:20:19,122 [dag.py:816] [DAG] start
+INFO 2022-02-22 13:20:19,123 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-22 13:20:19,125 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2022-02-22 13:20:19,133 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:20:19,133 [operator.py:1162] Init cuda env in process 0
+INFO 2022-02-22 13:20:19,133 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-22 13:20:20,151 [local_predict.py:115] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:False, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-22 13:20:21,523 [operator.py:1173] [rec|0] Succ init
+INFO 2022-02-22 13:20:54,929 [pipeline_server.py:51] (log_id=0) inference request name:recognition self.name:recognition
+INFO 2022-02-22 13:20:54,931 [operator.py:1421] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction
+INFO 2022-02-22 13:20:54,931 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2022-02-22 13:20:54,941 [operator.py:838] (data_id=0 log_id=0) [rec|0] Failed to process(batch: [0]): Fetch names should not be empty or out of saved fetch list. log_id:0
+Traceback (most recent call last):
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 827, in _run_process
+ midped_batch = self.process(feed_batch, typical_logid)
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 480, in process
+ log_id=typical_logid)
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_app/local_predict.py", line 241, in predict
+ log_id:{}".format(log_id))
+ValueError: Fetch names should not be empty or out of saved fetch list. log_id:0
+ERROR 2022-02-22 13:20:54,942 [operator.py:883] (log_id=0) rec failed to predict.
+ERROR 2022-02-22 13:20:54,944 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (log_id=0) rec failed to predict.
+INFO 2022-02-22 13:22:05,939 [loader.py:54] Loading faiss with AVX2 support.
+INFO 2022-02-22 13:22:05,939 [loader.py:58] Could not load library with AVX2 support due to:
+ModuleNotFoundError("No module named 'faiss.swigfaiss_avx2'")
+INFO 2022-02-22 13:22:05,939 [loader.py:64] Loading faiss.
+INFO 2022-02-22 13:22:05,959 [loader.py:66] Successfully loaded faiss.
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] tracer not set, use default: {}
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-22 13:22:05,965 [operator.py:163] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 1, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-22 13:22:05,965 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:22:05,965 [operator.py:267] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-22 13:22:05,965 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2022-02-22 13:22:05,966 [pipeline_server.py:207]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ }
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":1,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-22 13:22:05,966 [pipeline_server.py:212] -------------------------------------------
+INFO 2022-02-22 13:22:05,966 [operator.py:290] Op(rec) use local rpc service at port: []
+INFO 2022-02-22 13:22:05,980 [dag.py:493] [DAG] Succ init
+INFO 2022-02-22 13:22:05,981 [dag.py:651] ================= USED OP =================
+INFO 2022-02-22 13:22:05,981 [dag.py:654] rec
+INFO 2022-02-22 13:22:05,982 [dag.py:655] -------------------------------------------
+INFO 2022-02-22 13:22:06,011 [dag.py:784] [DAG] Succ build DAG
+INFO 2022-02-22 13:22:06,017 [dag.py:816] [DAG] start
+INFO 2022-02-22 13:22:06,018 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-22 13:22:06,020 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2022-02-22 13:22:06,028 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:22:06,028 [operator.py:1162] Init cuda env in process 0
+INFO 2022-02-22 13:22:06,028 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-22 13:22:07,054 [local_predict.py:115] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:False, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-22 13:22:08,418 [operator.py:1173] [rec|0] Succ init
+INFO 2022-02-22 13:22:12,633 [pipeline_server.py:51] (log_id=0) inference request name:recognition self.name:recognition
+INFO 2022-02-22 13:22:12,634 [operator.py:1421] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction
+INFO 2022-02-22 13:22:12,635 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2022-02-22 13:22:14,591 [operator.py:968] (data_id=0 log_id=0) [rec|0] Failed to postprocess: 'feature'
+Traceback (most recent call last):
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 964, in _run_postprocess
+ logid_dict.get(data_id))
+ File "recognition_web_service_onlyrec.py", line 189, in postprocess
+ batch_features = fetch_dict["feature"]
+KeyError: 'feature'
+ERROR 2022-02-22 13:22:14,594 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [rec|0] Failed to postprocess: 'feature'
+INFO 2022-02-22 13:23:16,027 [loader.py:54] Loading faiss with AVX2 support.
+INFO 2022-02-22 13:23:16,027 [loader.py:58] Could not load library with AVX2 support due to:
+ModuleNotFoundError("No module named 'faiss.swigfaiss_avx2'")
+INFO 2022-02-22 13:23:16,027 [loader.py:64] Loading faiss.
+INFO 2022-02-22 13:23:16,047 [loader.py:66] Successfully loaded faiss.
+WARNING 2022-02-22 13:23:16,051 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] tracer not set, use default: {}
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-22 13:23:16,053 [operator.py:163] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 1, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-22 13:23:16,053 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:23:16,053 [operator.py:267] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-22 13:23:16,053 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2022-02-22 13:23:16,053 [pipeline_server.py:207]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ }
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":1,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-22 13:23:16,053 [pipeline_server.py:212] -------------------------------------------
+INFO 2022-02-22 13:23:16,053 [operator.py:290] Op(rec) use local rpc service at port: []
+INFO 2022-02-22 13:23:16,067 [dag.py:493] [DAG] Succ init
+INFO 2022-02-22 13:23:16,068 [dag.py:651] ================= USED OP =================
+INFO 2022-02-22 13:23:16,068 [dag.py:654] rec
+INFO 2022-02-22 13:23:16,068 [dag.py:655] -------------------------------------------
+INFO 2022-02-22 13:23:16,095 [dag.py:784] [DAG] Succ build DAG
+INFO 2022-02-22 13:23:16,101 [dag.py:816] [DAG] start
+INFO 2022-02-22 13:23:16,102 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-22 13:23:16,104 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2022-02-22 13:23:16,112 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:23:16,112 [operator.py:1162] Init cuda env in process 0
+INFO 2022-02-22 13:23:16,112 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-22 13:23:17,164 [local_predict.py:115] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:False, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-22 13:23:18,559 [operator.py:1173] [rec|0] Succ init
+INFO 2022-02-22 13:23:19,657 [pipeline_server.py:51] (log_id=0) inference request name:recognition self.name:recognition
+INFO 2022-02-22 13:23:19,658 [operator.py:1421] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction
+INFO 2022-02-22 13:23:19,658 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-22 13:23:21,658 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2022-02-22 13:28:11,321 [loader.py:54] Loading faiss with AVX2 support.
+INFO 2022-02-22 13:28:11,321 [loader.py:58] Could not load library with AVX2 support due to:
+ModuleNotFoundError("No module named 'faiss.swigfaiss_avx2'")
+INFO 2022-02-22 13:28:11,321 [loader.py:64] Loading faiss.
+INFO 2022-02-22 13:28:11,340 [loader.py:66] Successfully loaded faiss.
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] tracer not set, use default: {}
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-22 13:28:11,346 [operator.py:163] local_service_conf: {'model_config': './general_PPLCNet_x2_5_lite_v1.0_serving', 'device_type': 1, 'devices': '0', 'client_type': 'local_predictor', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-22 13:28:11,346 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:28:11,346 [operator.py:267] rec
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: ./general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-22 13:28:11,346 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2022-02-22 13:28:11,347 [pipeline_server.py:207]
+{
+ "worker_num":1,
+ "http_port":9315,
+ "rpc_port":9314,
+ "dag":{
+ "is_thread_op":false,
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "tracer":{
+ "interval_s":-1
+ }
+ },
+ "op":{
+ "rec":{
+ "concurrency":1,
+ "local_service_conf":{
+ "model_config":"./general_PPLCNet_x2_5_lite_v1.0_serving",
+ "device_type":1,
+ "devices":"0",
+ "client_type":"local_predictor",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "build_dag_each_worker":false
+}
+INFO 2022-02-22 13:28:11,347 [pipeline_server.py:212] -------------------------------------------
+INFO 2022-02-22 13:28:11,347 [operator.py:290] Op(rec) use local rpc service at port: []
+INFO 2022-02-22 13:28:11,361 [dag.py:493] [DAG] Succ init
+INFO 2022-02-22 13:28:11,362 [dag.py:651] ================= USED OP =================
+INFO 2022-02-22 13:28:11,362 [dag.py:654] rec
+INFO 2022-02-22 13:28:11,362 [dag.py:655] -------------------------------------------
+INFO 2022-02-22 13:28:11,390 [dag.py:784] [DAG] Succ build DAG
+INFO 2022-02-22 13:28:11,396 [dag.py:816] [DAG] start
+INFO 2022-02-22 13:28:11,397 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-22 13:28:11,399 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2022-02-22 13:28:11,407 [local_service_handler.py:172] Models(./general_PPLCNet_x2_5_lite_v1.0_serving) will be launched by device gpu. use_gpu:True, use_trt:False, use_lite:False, use_xpu:False, device_type:1, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:28:11,407 [operator.py:1162] Init cuda env in process 0
+INFO 2022-02-22 13:28:11,407 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-22 13:28:12,432 [local_predict.py:115] LocalPredictor load_model_config params: model_path:./general_PPLCNet_x2_5_lite_v1.0_serving, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:False, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-22 13:28:13,791 [operator.py:1173] [rec|0] Succ init
+INFO 2022-02-22 13:28:17,425 [pipeline_server.py:51] (log_id=0) inference request name:recognition self.name:recognition
+INFO 2022-02-22 13:28:17,426 [operator.py:1421] RequestOp unpack one request. log_id:0, clientip: name:recognition, method:prediction
+INFO 2022-02-22 13:28:17,427 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-22 13:28:19,398 [dag.py:404] (data_id=0 log_id=0) Succ predict
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/PipelineServingLogs/pipeline.log.wf b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/PipelineServingLogs/pipeline.log.wf
new file mode 100644
index 000000000..157512243
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/PipelineServingLogs/pipeline.log.wf
@@ -0,0 +1,561 @@
+WARNING 2022-02-17 11:39:15,975 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:39:15,976 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:39:15,977 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2022-02-17 11:39:28,512 [operator.py:1109] (data_id=0 log_id=0) [rec|0] Failed to postprocess: postprocess() takes 4 positional arguments but 5 were given
+Traceback (most recent call last):
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1105, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: postprocess() takes 4 positional arguments but 5 were given
+ERROR 2022-02-17 11:39:28,515 [dag.py:410] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [rec|0] Failed to postprocess: postprocess() takes 4 positional arguments but 5 were given
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:40:11,389 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:40:11,390 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 11:43:51,478 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:43:51,478 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:43:51,478 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:43:51,479 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:45:00,971 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:45:00,972 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:49:47,902 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:49:47,903 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 11:50:16,675 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:50:16,676 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:50:16,677 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 11:51:34,991 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 11:51:34,992 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:15:00,101 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:15:00,102 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:15:19,771 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:15:19,772 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:24:47,291 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:24:47,292 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:25:34,907 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:25:34,968 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:25:34,969 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:28:35,990 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:28:35,991 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:33:50,873 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:33:50,874 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:33:50,875 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:33:50,875 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:38:14,874 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:38:14,875 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:38:14,876 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:41:58,835 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:41:58,836 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:41:58,837 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:45:45,876 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:45:45,877 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 13:48:08,096 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 13:48:08,168 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 13:48:08,169 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 13:48:08,170 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 13:48:08,170 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:00:34,470 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:00:34,471 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:00:34,472 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 14:00:53,138 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:00:53,139 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:00:53,140 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:00:53,140 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:00:53,140 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:00:53,140 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 14:09:35,769 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:09:35,770 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:09:35,771 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:10:28,771 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:10:28,772 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:10:28,773 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 14:14:29,769 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:14:29,770 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:14:29,771 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-17 14:26:17,697 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-17 14:26:17,697 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] tracer not set, use default: {}
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-17 14:26:17,698 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-22 13:20:19,068 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:20:19,068 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] tracer not set, use default: {}
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:20:19,069 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2022-02-22 13:20:54,941 [operator.py:838] (data_id=0 log_id=0) [rec|0] Failed to process(batch: [0]): Fetch names should not be empty or out of saved fetch list. log_id:0
+Traceback (most recent call last):
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 827, in _run_process
+ midped_batch = self.process(feed_batch, typical_logid)
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 480, in process
+ log_id=typical_logid)
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_app/local_predict.py", line 241, in predict
+ log_id:{}".format(log_id))
+ValueError: Fetch names should not be empty or out of saved fetch list. log_id:0
+ERROR 2022-02-22 13:20:54,942 [operator.py:883] (log_id=0) rec failed to predict.
+ERROR 2022-02-22 13:20:54,944 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (log_id=0) rec failed to predict.
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] tracer not set, use default: {}
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:22:05,964 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:22:05,965 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2022-02-22 13:22:14,591 [operator.py:968] (data_id=0 log_id=0) [rec|0] Failed to postprocess: 'feature'
+Traceback (most recent call last):
+ File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 964, in _run_postprocess
+ logid_dict.get(data_id))
+ File "recognition_web_service_onlyrec.py", line 189, in postprocess
+ batch_features = fetch_dict["feature"]
+KeyError: 'feature'
+ERROR 2022-02-22 13:22:14,594 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [rec|0] Failed to postprocess: 'feature'
+WARNING 2022-02-22 13:23:16,051 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] tracer not set, use default: {}
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:23:16,052 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] tracer not set, use default: {}
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] interval_s not set, use default: -1
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:28:11,345 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:28:11,346 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/PipelineServingLogs/pipeline.tracer b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/PipelineServingLogs/pipeline.tracer
new file mode 100644
index 000000000..e69de29bb
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/ProcessInfo.json b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/ProcessInfo.json
new file mode 100644
index 000000000..4e88c39e5
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/ProcessInfo.json
@@ -0,0 +1 @@
+[{"pid": 827, "port": [9314, 9315], "model": "pipline", "start_time": 1645079177.7113242}]
\ No newline at end of file
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/config_onlyrec.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/config_onlyrec.yml
new file mode 100644
index 000000000..bd9cada9b
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/config_onlyrec.yml
@@ -0,0 +1,33 @@
+#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG
+##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num
+worker_num: 1
+
+#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port
+http_port: 9315
+rpc_port: 9314
+
+dag:
+ #op资源类型, True, 为线程模型;False,为进程模型
+ is_thread_op: False
+op:
+ rec:
+ #并发数,is_thread_op=True时,为线程并发;否则为进程并发
+ concurrency: 1
+
+ #当op配置没有server_endpoints时,从local_service_conf读取本地服务配置
+ local_service_conf:
+
+ #uci模型路径
+ model_config: ./general_PPLCNet_x2_5_lite_v1.0_serving
+
+ #计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu
+ device_type: 1
+
+ #计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
+ devices: "0" # "0,1"
+
+ #client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测
+ client_type: local_predictor
+
+ #Fetch结果列表,以client_config中fetch_var的alias_name为准
+ fetch_list: ["save_infer_model/scale_0.tmp_1"]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/__model__ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/__model__
new file mode 100644
index 000000000..f19c502ac
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/__model__ differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/__params__ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/__params__
new file mode 100644
index 000000000..014e7c221
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/__params__ differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
new file mode 100644
index 000000000..d04a11f9a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
@@ -0,0 +1,16 @@
+feed_var {
+ name: "x"
+ alias_name: "x"
+ is_lod_tensor: false
+ feed_type: 1
+ shape: 3
+ shape: 224
+ shape: 224
+}
+fetch_var {
+ name: "save_infer_model/scale_0.tmp_1"
+ alias_name: "feature"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.stream.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.stream.prototxt
new file mode 100644
index 000000000..3c7ee64b1
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.stream.prototxt differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/index_result/id_map.pkl b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/index_result/id_map.pkl
new file mode 100644
index 000000000..f90dc2875
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/index_result/id_map.pkl differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/index_result/vector.index b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/index_result/vector.index
new file mode 100644
index 000000000..cb1981828
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/index_result/vector.index differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/pipeline_http_client.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/pipeline_http_client.py
new file mode 100644
index 000000000..dc2b8b456
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/pipeline_http_client.py
@@ -0,0 +1,51 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# from paddle_serving_server.pipeline import PipelineClient
+import numpy as np
+import requests
+import json
+import cv2
+import base64
+import os
+from time import *
+import threading
+
+
+def demo(url,data,i):
+ begin_time = time()
+ r = requests.post(url=url, data=json.dumps(data))
+ end_time = time()
+ run_time = end_time-begin_time
+ print ('线程 %d 时间 %f '%(i,run_time))
+ print(r.json())
+
+
+def cv2_to_base64(image):
+ return base64.b64encode(image).decode('utf8')
+
+url = "http://127.0.0.1:9315/recognition/prediction"
+with open(os.path.join(".", "test.jpg"), 'rb') as file:
+ image_data1 = file.read()
+image = cv2_to_base64(image_data1)
+
+for i in range(1):
+ data = {"key": ["image"], "value": [image]}
+ r = requests.post(url=url, data=json.dumps(data))
+ print(r.json())
+ #t = threading.Thread(target=demo, args=(url,data,i,))
+ #t.start()
+
+
+
+
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/recognition_web_service_onlyrec.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/recognition_web_service_onlyrec.py
new file mode 100644
index 000000000..2710ffb0a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/recognition_web_service_onlyrec.py
@@ -0,0 +1,220 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from paddle_serving_server.web_service import WebService, Op
+import logging
+import numpy as np
+import sys
+import cv2
+from paddle_serving_app.reader import *
+import base64
+import os
+import faiss
+import pickle
+import json
+
+
+class DetOp(Op):
+ def init_op(self):
+ self.img_preprocess = Sequential([
+ #Resize((416, 416)),
+ Div(255.0),
+ Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], False),
+ Transpose((2, 0, 1))
+ #Resize((416, 416)), Transpose((2, 0, 1))
+ ])
+
+ self.img_postprocess = RCNNPostprocess("label_list.txt", "output")
+ self.threshold = 0.3
+ self.max_det_results = 5
+
+ def Deresize(self, im, im_scale_x, im_scale_y):
+ #print("99999999999999997777777")
+ #print(im)
+ #print(im_scale_x,im_scale_y,cv2.INTER_LINEAR)
+
+ im = cv2.resize(
+ im,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=2)
+ #print(im)
+ return im
+
+ def generate_scale(self, im):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ Returns:
+ im_scale_x: the resize ratio of X
+ im_scale_y: the resize ratio of Y
+ """
+ target_size = [416, 416]
+ origin_shape = im.shape[:2]
+ resize_h, resize_w = target_size
+ im_scale_y = resize_h / float(origin_shape[0])
+ im_scale_x = resize_w / float(origin_shape[1])
+ return im_scale_y, im_scale_x
+
+ def preprocess(self, input_dicts, data_id, log_id):
+ (_, input_dict), = input_dicts.items()
+ imgs = []
+ raw_imgs = []
+ for key in input_dict.keys():
+ data = base64.b64decode(input_dict[key].encode('utf8'))
+ raw_imgs.append(data)
+ data = np.fromstring(data, np.uint8)
+ raw_im = cv2.imdecode(data, cv2.IMREAD_COLOR)[:, :, ::-1]
+ im_scale_y, im_scale_x = self.generate_scale(raw_im)
+ raw_im = self.Deresize(raw_im, im_scale_x, im_scale_y)
+ im = self.img_preprocess(raw_im)
+ #im = im.transpose((2, 0, 1)).copy()
+ #print(im)
+ imgs.append({
+ "image": im[np.newaxis, :],
+ "im_shape":
+ np.array(list(im.shape[1:])).reshape(-1)[np.newaxis, :],
+ "scale_factor":
+ np.array([[im_scale_y, im_scale_x]]).astype('float32'),
+ })
+ self.raw_img = raw_imgs
+
+ feed_dict = {
+ "image": np.concatenate(
+ [x["image"] for x in imgs], axis=0),
+ "im_shape": np.concatenate(
+ [x["im_shape"] for x in imgs], axis=0),
+ "scale_factor": np.concatenate(
+ [x["scale_factor"] for x in imgs], axis=0)
+ }
+ #print("feed_dict",feed_dict)
+ return feed_dict, False, None, ""
+
+ def postprocess(self, input_dicts, fetch_dict, log_id):
+ boxes = self.img_postprocess(fetch_dict, visualize=False)
+ #print("boxes",boxes)
+ boxes.sort(key=lambda x: x["score"], reverse=True)
+ boxes = filter(lambda x: x["score"] >= self.threshold,
+ boxes[:self.max_det_results])
+ boxes = list(boxes)
+ for i in range(len(boxes)):
+ boxes[i]["bbox"][2] += boxes[i]["bbox"][0] - 1
+ boxes[i]["bbox"][3] += boxes[i]["bbox"][1] - 1
+ result = json.dumps(boxes)
+ res_dict = {"bbox_result": result, "image": self.raw_img}
+ return res_dict, None, ""
+
+
+class RecOp(Op):
+ def init_op(self):
+ self.seq = Sequential([
+ BGR2RGB(), Resize((224, 224)), Div(255),
+ Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
+ False), Transpose((2, 0, 1))
+ ])
+
+ index_dir = "./index_result/"
+ assert os.path.exists(os.path.join(
+ index_dir, "vector.index")), "vector.index not found ..."
+ assert os.path.exists(os.path.join(
+ index_dir, "id_map.pkl")), "id_map.pkl not found ... "
+
+ self.searcher = faiss.read_index(
+ os.path.join(index_dir, "vector.index"))
+
+ with open(os.path.join(index_dir, "id_map.pkl"), "rb") as fd:
+ self.id_map = pickle.load(fd)
+
+ self.rec_nms_thresold = 0.05
+ self.rec_score_thres = 0.5
+ self.feature_normalize = True
+ self.return_k =5
+
+ def preprocess(self, input_dicts, data_id, log_id):
+ (_, input_dict), = input_dicts.items()
+
+ raw_img = input_dict["image"]
+ raw_img = base64.b64decode(raw_img)
+ data = np.frombuffer(raw_img, np.uint8)
+ origin_img = cv2.imdecode(data, cv2.IMREAD_COLOR)
+ #construct batch images for rec
+ imgs = []
+ img = self.seq(origin_img)
+ imgs.append(img[np.newaxis, :].copy())
+
+ input_imgs = np.concatenate(imgs, axis=0)
+ return {"x": input_imgs}, False, None, ""
+
+ def nms_to_rec_results(self, results, thresh=0.1):
+ #print("results",results)
+ filtered_results = []
+ x1 = np.array([r["bbox"][0] for r in results]).astype("float32")
+ y1 = np.array([r["bbox"][1] for r in results]).astype("float32")
+ x2 = np.array([r["bbox"][2] for r in results]).astype("float32")
+ y2 = np.array([r["bbox"][3] for r in results]).astype("float32")
+ scores = np.array([r["rec_scores"] for r in results])
+
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+ order = scores.argsort()[::-1]
+ while order.size > 0:
+ i = order[0]
+ xx1 = np.maximum(x1[i], x1[order[1:]])
+ yy1 = np.maximum(y1[i], y1[order[1:]])
+ xx2 = np.minimum(x2[i], x2[order[1:]])
+ yy2 = np.minimum(y2[i], y2[order[1:]])
+
+ w = np.maximum(0.0, xx2 - xx1 + 1)
+ h = np.maximum(0.0, yy2 - yy1 + 1)
+ inter = w * h
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
+ inds = np.where(ovr <= thresh)[0]
+ order = order[inds + 1]
+ filtered_results.append(results[i])
+ return filtered_results
+
+ def postprocess(self, input_dicts, fetch_dict, log_id,data_id = 0):
+ #print("fetch_dict",fetch_dict)
+ batch_features = fetch_dict["save_infer_model/scale_0.tmp_1"]
+
+ if self.feature_normalize:
+ feas_norm = np.sqrt(
+ np.sum(np.square(batch_features), axis=1, keepdims=True))
+ batch_features = np.divide(batch_features, feas_norm)
+
+ scores, docs = self.searcher.search(batch_features, self.return_k)
+ print(scores)
+ results = []
+ for i in range(scores.shape[0]):
+ pred = {}
+ if scores[i][0] >= self.rec_score_thres:
+ pred["rec_docs"] = self.id_map[docs[i][0]].split()[1]
+ pred["rec_scores"] = scores[i][0]
+ results.append(pred)
+
+ #do nms
+ #results = self.nms_to_rec_results(results, self.rec_nms_thresold)
+ return {"result": str(results)}, None, ""
+
+
+class RecognitionService(WebService):
+ def get_pipeline_response(self, read_op):
+ #det_op = DetOp(name="det", input_ops=[read_op])
+ rec_op = RecOp(name="rec", input_ops=[read_op])
+ return rec_op
+
+
+product_recog_service = RecognitionService(name="recognition")
+product_recog_service.prepare_pipeline_config("config_onlyrec.yml")
+product_recog_service.run_service()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/test.jpg b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/test.jpg
new file mode 100644
index 000000000..c7c19da9a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/general_PPLCNet_x2_5_lite_v1.0/test.jpg differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/index_label.txt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/index_label.txt
new file mode 100644
index 000000000..336b98473
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/index_label.txt
@@ -0,0 +1,144 @@
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15114.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15010.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15038.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15042.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15111.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15127.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15028.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10036.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15033.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15109.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10033.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15047.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15023.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15068.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15107.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15074.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15035.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15063.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15043.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15015.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15124.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10011.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15100.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15106.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15108.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15050.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15113.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15016.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15037.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15110.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15018.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15030.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15014.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15060.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15069.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15133.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15041.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15141.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15020.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15104.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15022.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10032.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15034.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15053.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15058.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15051.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15139.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15119.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15017.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15009.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15131.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15101.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15054.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15064.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15130.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15011.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15120.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15048.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15032.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15052.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10017.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15123.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15073.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15055.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15062.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10024.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15117.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15125.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15070.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15046.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15049.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15142.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15129.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15029.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10012.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15126.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15045.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15007.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15057.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15105.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15024.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15121.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15036.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15140.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15039.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15102.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15027.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15008.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15025.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15115.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15021.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15132.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15071.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15031.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15026.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15044.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15118.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15116.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15112.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15135.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15061.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15067.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15137.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15013.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10019.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15066.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15072.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10030.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15056.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15059.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15122.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/0-10001.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15012.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/摩托车/15103.jpg 摩托车
+/home/aistudio/data/data128448/index_motorcycle/人/0-10528.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/人/0-10504.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/人/0-10501.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/人/0-10195.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/人/0-10377.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/人/0-10259.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/人/0-10285.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/人/0-10201.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/人/0-10182.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/人/0-10522.jpg 人
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10112.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10015.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10013.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10073.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10020.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10071.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10078.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10055.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10072.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/其他杂物/0-10014.jpg 其他杂物
+/home/aistudio/data/data128448/index_motorcycle/自行车/0-10549.jpg 自行车
+/home/aistudio/data/data128448/index_motorcycle/自行车/0-10381.jpg 自行车
+/home/aistudio/data/data128448/index_motorcycle/自行车/0-10193.jpg 自行车
+/home/aistudio/data/data128448/index_motorcycle/自行车/0.jpg 自行车
+/home/aistudio/data/data128448/index_motorcycle/自行车/0-10290.jpg 自行车
+/home/aistudio/data/data128448/index_motorcycle/自行车/0-10685.jpg 自行车
+/home/aistudio/data/data128448/index_motorcycle/自行车/0-10180.jpg 自行车
+/home/aistudio/data/data128448/index_motorcycle/自行车/0-11043.jpg 自行车
+/home/aistudio/data/data128448/index_motorcycle/自行车/0-10461.jpg 自行车
+/home/aistudio/data/data128448/index_motorcycle/自行车/0-10777.jpg 自行车
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/make_label.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/make_label.py
new file mode 100644
index 000000000..f6a563cd7
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/make_label.py
@@ -0,0 +1,11 @@
+import os
+root_path = "/home/aistudio/data/data128448/index_motorcycle/"
+dirs = os.listdir(root_path)
+dir_dict = {"person":"人","motorcycle":"电瓶车/摩托车","bicycle":"自行车","others":"其他"}
+with open("index_label.txt","w") as f:
+ for dir in dirs:
+ path = root_path + dir + "/"
+ print(path)
+ filenames = os.listdir(path)
+ for filename in filenames:
+ f.write(path+filename+" "+dir+"\n")
\ No newline at end of file
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt
new file mode 100644
index 000000000..1030614c1
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt
@@ -0,0 +1,16 @@
+feed_var {
+ name: "x"
+ alias_name: "x"
+ is_lod_tensor: false
+ feed_type: 1
+ shape: 3
+ shape: 224
+ shape: 224
+}
+fetch_var {
+ name: "save_infer_model/scale_0.tmp_1"
+ alias_name: "save_infer_model/scale_0.tmp_1"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.stream.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.stream.prototxt
new file mode 100644
index 000000000..3c7ee64b1
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.stream.prototxt differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer.tar b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer.tar
new file mode 100644
index 000000000..19129c3cf
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer.tar differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams
new file mode 100644
index 000000000..014e7c221
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams.info b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams.info
new file mode 100644
index 000000000..4b645bfe1
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdiparams.info differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdmodel b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdmodel
new file mode 100644
index 000000000..a444c2daa
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.pdmodel differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/__model__ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/__model__
new file mode 100644
index 000000000..49962238d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/__model__ differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/__params__ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/__params__
new file mode 100644
index 000000000..014e7c221
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/__params__ differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
new file mode 100644
index 000000000..1030614c1
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt
@@ -0,0 +1,16 @@
+feed_var {
+ name: "x"
+ alias_name: "x"
+ is_lod_tensor: false
+ feed_type: 1
+ shape: 3
+ shape: 224
+ shape: 224
+}
+fetch_var {
+ name: "save_infer_model/scale_0.tmp_1"
+ alias_name: "save_infer_model/scale_0.tmp_1"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.stream.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.stream.prototxt
new file mode 100644
index 000000000..3c7ee64b1
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/models/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.stream.prototxt differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/PipelineServingLogs/pipeline.log b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/PipelineServingLogs/pipeline.log
new file mode 100644
index 000000000..073f54453
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/PipelineServingLogs/pipeline.log
@@ -0,0 +1,8272 @@
+WARNING 2021-12-29 02:45:16,604 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 02:45:16,604 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 02:45:16,604 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 02:45:16,604 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 02:45:16,606 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 02:45:16,607 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 02:45:16,607 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 02:45:16,607 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 02:45:16,607 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18082,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9998,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 02:45:16,607 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 02:45:16,607 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 02:45:16,631 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 02:45:16,632 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 02:45:16,632 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 02:45:16,632 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 02:45:16,677 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 02:45:16,681 [dag.py:816] [DAG] start
+INFO 2021-12-29 02:45:16,682 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 02:45:16,688 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 02:45:16,710 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 02:45:16,710 [operator.py:1163] Init cuda env in process 0
+INFO 2021-12-29 02:45:16,710 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 02:45:17,939 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 02:45:19,138 [operator.py:1174] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 02:45:44,185 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 02:45:44,187 [operator.py:1422] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 02:45:44,188 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 02:45:45,873 [operator.py:969] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 965, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 76, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 429, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 02:45:45,877 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 03:07:14,510 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 03:07:14,510 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:07:14,510 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 03:07:14,512 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 03:07:14,513 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 03:07:14,513 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 03:07:14,513 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 03:07:14,513 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18082,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9998,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 03:07:14,513 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 03:07:14,513 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 03:07:14,538 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 03:07:14,539 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 03:07:14,539 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 03:07:14,539 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 03:07:14,585 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 03:07:14,589 [dag.py:816] [DAG] start
+INFO 2021-12-29 03:07:14,589 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 03:07:14,595 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 03:07:14,617 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 03:07:14,617 [operator.py:1163] Init cuda env in process 0
+INFO 2021-12-29 03:07:14,618 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 03:07:15,847 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 03:07:17,038 [operator.py:1174] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 03:07:20,880 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 03:07:20,882 [operator.py:1422] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 03:07:20,882 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 03:07:22,696 [operator.py:969] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 965, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 77, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 429, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 03:07:22,700 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 03:10:13,372 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 03:10:13,375 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 03:10:13,375 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 03:10:13,375 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 03:10:13,375 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 03:10:13,375 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18082,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9998,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 03:10:13,375 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 03:10:13,375 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 03:10:13,393 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 03:10:13,393 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 03:10:13,394 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 03:10:13,394 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 03:10:13,436 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 03:10:13,439 [dag.py:816] [DAG] start
+INFO 2021-12-29 03:10:13,440 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 03:10:13,445 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 03:10:13,468 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 03:10:13,469 [operator.py:1163] Init cuda env in process 0
+INFO 2021-12-29 03:10:13,469 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 03:10:14,628 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 03:10:15,826 [operator.py:1174] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 03:10:19,409 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 03:10:19,411 [operator.py:1422] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 03:10:19,411 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 03:10:21,260 [operator.py:969] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 965, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 78, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 429, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 03:10:21,264 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 03:11:47,325 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 03:11:47,325 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 03:11:47,325 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 03:11:47,325 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 03:11:47,325 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 03:11:47,326 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18082,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9998,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 03:11:47,326 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 03:11:47,326 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 03:11:47,348 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 03:11:47,349 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 03:11:47,349 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 03:11:47,349 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 03:11:47,395 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 03:11:47,398 [dag.py:816] [DAG] start
+INFO 2021-12-29 03:11:47,399 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 03:11:47,403 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 03:11:47,431 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 03:11:47,432 [operator.py:1163] Init cuda env in process 0
+INFO 2021-12-29 03:11:47,432 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 03:11:48,697 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 03:11:49,910 [operator.py:1174] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 03:11:53,938 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 03:11:53,939 [operator.py:1422] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 03:11:53,940 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 03:11:55,757 [operator.py:969] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 965, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 78, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 429, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 03:11:55,761 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 05:35:58,321 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 05:35:58,321 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:35:58,321 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 05:35:58,321 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 05:35:58,323 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 05:35:58,323 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 05:35:58,323 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 05:35:58,323 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-29 05:37:04,889 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 05:37:04,889 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:37:04,889 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 05:37:04,891 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 05:37:04,892 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 05:37:04,892 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 05:37:04,892 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 05:37:04,892 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 05:37:04,892 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 05:37:04,892 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 05:37:04,915 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 05:37:04,916 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 05:37:04,916 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 05:37:04,916 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 05:37:04,962 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 05:37:04,967 [dag.py:816] [DAG] start
+INFO 2021-12-29 05:37:04,968 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 05:37:04,974 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 05:37:04,992 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 05:37:04,993 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 05:37:04,993 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 05:37:06,170 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 05:37:07,358 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 05:37:14,620 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 05:37:14,621 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 05:37:14,621 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 05:37:16,537 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 77, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 05:37:16,542 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 05:40:11,811 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 05:40:11,811 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 05:40:11,811 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 05:40:11,811 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 05:40:11,811 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 05:40:11,811 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 05:40:11,812 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 05:40:11,812 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 05:40:11,812 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 05:40:11,837 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 05:40:11,838 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 05:40:11,839 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 05:40:11,839 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 05:40:11,880 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 05:40:11,884 [dag.py:816] [DAG] start
+INFO 2021-12-29 05:40:11,885 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 05:40:11,890 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 05:40:11,909 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 05:40:11,910 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 05:40:11,910 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 05:40:13,297 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 05:40:14,485 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 05:40:16,831 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 05:40:16,832 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 05:40:16,834 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 05:40:18,654 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 77, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 05:40:18,658 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 05:42:11,543 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 05:42:11,543 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:42:11,543 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 05:42:11,545 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 05:42:11,546 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 05:42:11,546 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 05:42:11,546 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 05:42:11,546 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 05:42:11,546 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 05:42:11,546 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 05:42:11,569 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 05:42:11,569 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 05:42:11,570 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 05:42:11,570 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 05:42:11,615 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 05:42:11,618 [dag.py:816] [DAG] start
+INFO 2021-12-29 05:42:11,619 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 05:42:11,626 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 05:42:11,650 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 05:42:11,651 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 05:42:11,651 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 05:42:12,841 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 05:42:14,032 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 05:42:17,467 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 05:42:17,469 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 05:42:17,470 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 05:42:19,333 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 77, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 05:42:19,340 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 06:08:54,355 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:08:54,355 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:08:54,355 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:08:54,357 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:08:54,358 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:08:54,358 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:08:54,358 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:08:54,358 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:08:54,358 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:08:54,358 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:08:54,380 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:08:54,381 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:08:54,381 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:08:54,381 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:08:54,427 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:08:54,431 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:08:54,432 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:08:54,438 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:08:54,460 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:08:54,460 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:08:54,461 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:08:55,641 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:08:56,841 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'yaml' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 33, in init_op
+ yml_conf = yaml.safe_load(f)
+NameError: name 'yaml' is not defined
+INFO 2021-12-29 06:09:00,289 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:09:00,290 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:09:00,291 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+WARNING 2021-12-29 06:10:19,803 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:10:19,803 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:10:19,803 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:10:19,803 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:10:19,805 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:10:19,805 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:10:19,805 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:10:19,805 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:10:19,805 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:10:19,805 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:10:19,806 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:10:19,806 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:10:19,806 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:10:19,806 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:10:19,806 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:10:19,830 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:10:19,831 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:10:19,831 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:10:19,831 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:10:19,878 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:10:19,882 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:10:19,883 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:10:19,890 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:10:19,904 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:10:19,905 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:10:19,905 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:10:21,094 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:10:22,339 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'yaml' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 33, in init_op
+ yml_conf = yaml.safe_load(f)
+NameError: name 'yaml' is not defined
+INFO 2021-12-29 06:10:26,440 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:10:26,441 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:10:26,442 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-29 06:11:59,537 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:11:59,538 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:11:59,538 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+WARNING 2021-12-29 06:12:08,931 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:12:08,931 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:12:08,931 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:12:08,933 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:12:08,934 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:12:08,934 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:12:08,934 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:12:08,934 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:12:08,934 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:12:08,934 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:12:08,958 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:12:08,959 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:12:08,959 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:12:08,959 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:12:09,004 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:12:09,008 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:12:09,009 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:12:09,016 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:12:09,032 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:12:09,032 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:12:09,032 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:12:10,227 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:12:11,443 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 40, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+INFO 2021-12-29 06:12:12,867 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:12:12,868 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:12:12,869 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+WARNING 2021-12-29 06:12:47,188 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:12:47,188 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:12:47,191 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:12:47,191 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:12:47,191 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:12:47,191 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:12:47,191 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:12:47,191 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:12:47,191 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:12:47,213 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:12:47,213 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:12:47,213 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:12:47,213 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:12:47,259 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:12:47,263 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:12:47,264 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:12:47,269 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:12:47,293 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:12:47,293 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:12:47,293 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:12:48,478 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:12:49,708 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 41, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+INFO 2021-12-29 06:12:50,036 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:12:50,037 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:12:50,038 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+WARNING 2021-12-29 06:15:10,463 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:15:10,466 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:15:10,466 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:15:10,466 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:15:10,466 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:15:10,466 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:15:10,466 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:15:10,466 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:15:10,488 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:15:10,488 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:15:10,489 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:15:10,489 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:15:10,535 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:15:10,540 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:15:10,540 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:15:10,546 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:15:10,568 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:15:10,569 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:15:10,569 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:15:11,748 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:15:12,951 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 43, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+WARNING 2021-12-29 06:17:36,321 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:17:36,321 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:17:36,324 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:17:36,324 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:17:36,324 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:17:36,324 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:17:36,324 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:17:36,324 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:17:36,324 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:17:36,346 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:17:36,347 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:17:36,347 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:17:36,348 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:17:36,391 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:17:36,394 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:17:36,395 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:17:36,401 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:17:36,427 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:17:36,428 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:17:36,428 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:17:37,610 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:17:38,816 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 44, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+WARNING 2021-12-29 06:18:17,409 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:18:17,409 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:18:17,409 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:18:17,411 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:18:17,412 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:18:17,412 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:18:17,412 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:18:17,412 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:18:17,412 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:18:17,412 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:18:17,436 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:18:17,436 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:18:17,436 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:18:17,437 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:18:17,477 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:18:17,482 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:18:17,483 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:18:17,491 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:18:17,505 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:18:17,506 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:18:17,506 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:18:18,701 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:18:19,908 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 45, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+WARNING 2021-12-29 06:19:57,871 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:19:57,874 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:19:57,874 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:19:57,874 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:19:57,874 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:19:57,874 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:19:57,875 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:19:57,875 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:19:57,900 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:19:57,900 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:19:57,901 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:19:57,901 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:19:57,945 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:19:57,950 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:19:57,951 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:19:57,957 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:19:57,975 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:19:57,975 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:19:57,976 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:19:59,153 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:20:00,415 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 45, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:21:28,629 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:21:28,629 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:21:28,629 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:21:28,632 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:21:28,632 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:21:28,632 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:21:28,632 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:21:28,632 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:21:28,632 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:21:28,632 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:21:28,657 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:21:28,658 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:21:28,658 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:21:28,658 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:21:28,702 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:21:28,705 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:21:28,706 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:21:28,711 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:21:28,728 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:21:28,728 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:21:28,729 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:21:29,937 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:21:31,123 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 46, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:25:12,051 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:25:12,051 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:25:12,054 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:25:12,054 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:25:12,054 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:25:12,054 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:25:12,054 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:25:12,054 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:25:12,054 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:25:12,078 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:25:12,078 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:25:12,079 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:25:12,079 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:25:12,121 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:25:12,125 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:25:12,125 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:25:12,131 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:25:12,155 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:25:12,156 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:25:12,156 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:25:13,482 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:25:14,695 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 46, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:25:29,447 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:25:29,447 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:25:29,447 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:25:29,447 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:25:29,447 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:25:29,447 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:25:29,447 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:25:29,448 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:25:29,448 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:25:29,448 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:25:29,471 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:25:29,472 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:25:29,472 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:25:29,472 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:25:29,515 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:25:29,519 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:25:29,519 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:25:29,525 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:25:29,544 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:25:29,544 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:25:29,545 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:25:30,728 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:25:31,935 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 47, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:31:43,452 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:31:43,455 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:31:43,455 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:31:43,455 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:31:43,455 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:31:43,455 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:31:43,455 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:31:43,456 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:31:43,479 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:31:43,479 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:31:43,480 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:31:43,480 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:31:43,524 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:31:43,527 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:31:43,528 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:31:43,534 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:31:43,559 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:31:43,560 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:31:43,560 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:31:44,753 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2021-12-29 06:31:45,962 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 49, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:32:31,020 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:32:31,023 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:32:31,023 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:32:31,023 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:32:31,023 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:32:31,023 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:32:31,023 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:32:31,024 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:32:31,048 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:32:31,049 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:32:31,049 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:32:31,049 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:32:31,091 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:32:31,095 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:32:31,096 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:32:31,102 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:32:31,127 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:32:31,128 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:32:31,128 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:32:32,329 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:32:33,520 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+WARNING 2021-12-29 06:33:05,247 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:33:05,252 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:33:05,252 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:33:05,252 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:33:05,252 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:33:05,252 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:33:05,253 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:33:05,253 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:33:05,276 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:33:05,277 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:33:05,277 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:33:05,277 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:33:05,321 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:33:05,324 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:33:05,325 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:33:05,331 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:33:05,355 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:33:05,356 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:33:05,356 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:33:06,480 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:33:07,686 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:40:19,259 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:40:19,259 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:40:19,259 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:40:19,259 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:40:19,259 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:40:19,260 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:40:19,260 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:40:19,260 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:40:19,285 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:40:19,286 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:40:19,286 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:40:19,286 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:40:19,333 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:40:19,337 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:40:19,337 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:40:19,343 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:40:19,359 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:40:19,360 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:40:19,360 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:40:20,576 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:40:21,771 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 06:40:25,060 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:40:25,061 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:40:25,062 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 06:40:25,105 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 54, in preprocess
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+UnboundLocalError: local variable 'im_info' referenced before assignment
+ERROR 2021-12-29 06:40:25,111 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:42:03,383 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:42:03,383 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:42:03,383 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:42:03,383 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:42:03,383 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:42:03,383 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:42:03,384 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:42:03,408 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:42:03,409 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:42:03,409 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:42:03,410 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:42:03,452 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:42:03,456 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:42:03,456 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:42:03,464 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:42:03,483 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:42:03,484 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:42:03,484 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:42:04,712 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:42:05,914 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 06:42:13,067 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:42:13,069 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:42:13,070 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 06:42:13,106 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 54, in preprocess
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+UnboundLocalError: local variable 'im_info' referenced before assignment
+ERROR 2021-12-29 06:42:13,112 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:42:44,176 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:42:44,176 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:42:44,176 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:42:44,176 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:42:44,176 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:42:44,177 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:42:44,177 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:42:44,177 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:42:44,177 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:42:44,177 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:42:44,201 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:42:44,201 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:42:44,202 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:42:44,202 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:42:44,248 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:42:44,252 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:42:44,253 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:42:44,259 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:42:44,278 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:42:44,279 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:42:44,279 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:42:45,483 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:42:46,679 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 06:42:47,152 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:42:47,153 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:42:47,154 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 06:42:47,195 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 55, in preprocess
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+UnboundLocalError: local variable 'im_info' referenced before assignment
+ERROR 2021-12-29 06:42:47,200 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:44:34,235 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:44:34,235 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:44:34,235 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:44:34,235 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:44:34,235 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:44:34,235 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:44:34,236 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:44:34,236 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:44:34,258 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:44:34,259 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:44:34,259 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:44:34,259 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:44:34,301 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:44:34,306 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:44:34,307 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:44:34,313 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:44:34,329 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:44:34,330 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:44:34,330 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:44:35,557 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:44:36,750 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 06:44:43,663 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:44:43,664 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:44:43,665 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 06:44:43,710 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 55, in preprocess
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+UnboundLocalError: local variable 'im_info' referenced before assignment
+ERROR 2021-12-29 06:44:43,715 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+WARNING 2021-12-29 06:46:19,030 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:46:19,030 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:46:19,030 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:46:19,030 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:46:19,032 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:46:19,032 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:46:19,032 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:46:19,032 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:46:19,032 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:46:19,032 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:46:19,033 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:46:19,033 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:46:19,033 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:46:19,033 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:46:19,033 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:46:19,057 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:46:19,058 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:46:19,058 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:46:19,058 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:46:19,108 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:46:19,111 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:46:19,112 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:46:19,118 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:46:19,146 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:46:19,147 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:46:19,147 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:46:20,373 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:46:21,561 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 06:46:25,488 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:46:25,490 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:46:25,490 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 06:46:25,581 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: operands could not be broadcast together with shapes (3,640,640) (1,1,3) (3,640,640)
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 71, in preprocess
+ im = self.img_preprocess(im)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 492, in __call__
+ img = t(img)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 642, in __call__
+ return F.normalize(img, self.mean, self.std, self.channel_first)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/functional.py", line 33, in normalize
+ img -= img_mean
+ValueError: operands could not be broadcast together with shapes (3,640,640) (1,1,3) (3,640,640)
+ERROR 2021-12-29 06:46:25,587 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: operands could not be broadcast together with shapes (3,640,640) (1,1,3) (3,640,640)
+WARNING 2021-12-29 06:51:01,066 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:51:01,066 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:51:01,066 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:51:01,069 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:51:01,069 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:51:01,069 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:51:01,069 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:51:01,069 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:51:01,069 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:51:01,069 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:51:01,094 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:51:01,095 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:51:01,095 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:51:01,095 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:51:01,140 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:51:01,144 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:51:01,145 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:51:01,151 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:51:01,171 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:51:01,172 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:51:01,172 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:51:02,379 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:51:03,568 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 06:51:06,257 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:51:06,258 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:51:06,259 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 06:51:07,885 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 89, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 06:51:07,889 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 06:51:56,944 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:51:56,947 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:51:56,947 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:51:56,947 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:51:56,947 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:51:56,948 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:51:56,948 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:51:56,948 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:51:56,971 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:51:56,972 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:51:56,972 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:51:56,972 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:51:57,019 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:51:57,023 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:51:57,024 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:51:57,029 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:51:57,049 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:51:57,050 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:51:57,050 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:51:58,256 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:51:59,445 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+WARNING 2021-12-29 06:52:13,864 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:52:13,867 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:52:13,867 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:52:13,867 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:52:13,867 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:52:13,868 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:52:13,868 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:52:13,868 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:52:13,892 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:52:13,893 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:52:13,893 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:52:13,893 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:52:13,936 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:52:13,939 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:52:13,940 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:52:13,945 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:52:13,966 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:52:13,966 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:52:13,966 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:52:15,154 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:52:16,355 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 06:52:18,677 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:52:18,678 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:52:18,679 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 06:52:20,309 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 86, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 06:52:20,314 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 06:54:00,624 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:54:00,624 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:54:00,624 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 06:54:00,626 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 06:54:00,626 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:54:00,627 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 06:54:00,627 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 06:54:00,627 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 06:54:00,627 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 06:54:00,627 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 06:54:00,652 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 06:54:00,653 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 06:54:00,653 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 06:54:00,653 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 06:54:00,699 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 06:54:00,703 [dag.py:816] [DAG] start
+INFO 2021-12-29 06:54:00,703 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 06:54:00,709 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 06:54:00,732 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 06:54:00,733 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 06:54:00,733 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 06:54:01,924 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 06:54:03,172 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 06:54:05,727 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 06:54:05,728 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 06:54:05,729 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 06:54:07,453 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 86, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 06:54:07,458 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:13:12,175 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:13:12,175 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 07:13:12,178 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 07:13:12,178 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:13:12,178 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 07:13:12,178 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 07:13:12,178 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 07:13:12,178 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 07:13:12,178 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 07:13:12,201 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 07:13:12,202 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 07:13:12,202 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 07:13:12,202 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 07:13:12,251 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 07:13:12,254 [dag.py:816] [DAG] start
+INFO 2021-12-29 07:13:12,255 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 07:13:12,260 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 07:13:12,284 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:13:12,284 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 07:13:12,285 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 07:13:13,478 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 07:13:14,670 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 07:13:18,607 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 07:13:18,609 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 07:13:18,609 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 07:13:20,265 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 07:13:20,269 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:16:27,832 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:16:27,832 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 07:16:27,835 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 07:16:27,835 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:16:27,835 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 07:16:27,835 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 07:16:27,835 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 07:16:27,835 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 07:16:27,835 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 07:16:27,860 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 07:16:27,861 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 07:16:27,861 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 07:16:27,861 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 07:16:27,904 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 07:16:27,907 [dag.py:816] [DAG] start
+INFO 2021-12-29 07:16:27,908 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 07:16:27,915 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 07:16:27,939 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:16:27,939 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 07:16:27,939 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 07:16:29,142 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 07:16:30,333 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 07:16:32,190 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 07:16:32,192 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 07:16:32,193 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 07:16:32,263 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: name 'im_shape' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 68, in preprocess
+ "im_shape": im_info[im_shape],#np.array(list(im.shape[1:])).reshape(-1)[np.newaxis,:],
+NameError: name 'im_shape' is not defined
+ERROR 2021-12-29 07:16:32,267 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: name 'im_shape' is not defined
+WARNING 2021-12-29 07:17:33,514 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:17:33,514 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 07:17:33,517 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 07:17:33,517 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:17:33,517 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 07:17:33,517 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 07:17:33,517 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 07:17:33,517 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 07:17:33,518 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 07:17:33,542 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 07:17:33,543 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 07:17:33,543 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 07:17:33,543 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 07:17:33,585 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 07:17:33,588 [dag.py:816] [DAG] start
+INFO 2021-12-29 07:17:33,589 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 07:17:33,594 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 07:17:33,615 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:17:33,616 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 07:17:33,616 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 07:17:34,803 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 07:17:35,996 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 07:17:38,155 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 07:17:38,156 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 07:17:38,158 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 07:17:39,797 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 07:17:39,802 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:19:06,141 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 07:19:06,144 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 07:19:06,144 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:19:06,144 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 07:19:06,144 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 07:19:06,144 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 07:19:06,144 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 07:19:06,144 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 07:19:06,167 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 07:19:06,168 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 07:19:06,168 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 07:19:06,168 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 07:19:06,213 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 07:19:06,217 [dag.py:816] [DAG] start
+INFO 2021-12-29 07:19:06,219 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 07:19:06,224 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 07:19:06,242 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:19:06,242 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 07:19:06,243 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 07:19:07,427 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 07:19:08,615 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 07:19:11,136 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 07:19:11,137 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 07:19:11,137 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 07:19:12,763 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 07:19:12,767 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:40:25,237 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:40:25,237 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:40:25,237 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:40:25,237 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:40:25,239 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:40:25,239 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:40:25,239 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-29 07:42:11,629 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:42:11,629 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:42:11,629 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:42:11,631 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:42:11,631 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:42:11,631 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:42:11,631 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 07:42:11,631 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 07:42:11,631 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:42:11,632 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 07:42:11,632 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 07:42:11,632 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 07:42:11,632 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 07:42:11,634 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 07:42:11,659 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 07:42:11,660 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 07:42:11,660 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 07:42:11,660 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 07:42:11,707 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 07:42:11,711 [dag.py:816] [DAG] start
+INFO 2021-12-29 07:42:11,713 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 07:42:11,719 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 07:42:11,742 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:42:11,742 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 07:42:11,743 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 07:42:12,964 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 07:42:14,178 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 07:42:24,007 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 07:42:24,009 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 07:42:24,009 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 07:42:25,683 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 07:42:25,687 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:49:58,432 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-29 07:49:58,435 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-29 07:49:58,435 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:49:58,435 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-29 07:49:58,435 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-29 07:49:58,435 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-29 07:49:58,435 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-29 07:49:58,435 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-29 07:49:58,458 [dag.py:493] [DAG] Succ init
+INFO 2021-12-29 07:49:58,459 [dag.py:651] ================= USED OP =================
+INFO 2021-12-29 07:49:58,459 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-29 07:49:58,460 [dag.py:655] -------------------------------------------
+INFO 2021-12-29 07:49:58,505 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-29 07:49:58,509 [dag.py:816] [DAG] start
+INFO 2021-12-29 07:49:58,509 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-29 07:49:58,515 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-29 07:49:58,539 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-29 07:49:58,540 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-29 07:49:58,540 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-29 07:49:59,775 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-29 07:50:00,998 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-29 07:50:03,927 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-29 07:50:03,928 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-29 07:50:03,930 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-29 07:50:05,801 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_4.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_4.tmp_1.lod'
+ERROR 2021-12-29 07:50:05,806 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_4.tmp_1.lod'
+WARNING 2021-12-30 06:53:16,236 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 06:53:16,239 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 06:53:16,239 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 06:53:16,239 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 06:53:16,239 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 06:53:16,239 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 06:53:16,239 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 06:53:16,239 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 06:53:16,265 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 06:53:16,266 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 06:53:16,266 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 06:53:16,266 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 06:53:16,310 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 06:53:16,315 [dag.py:816] [DAG] start
+INFO 2021-12-30 06:53:16,316 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 06:53:16,323 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 06:53:16,341 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 06:53:16,341 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 06:53:16,342 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 06:53:17,563 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 06:53:18,787 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 06:53:26,765 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 06:53:26,766 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 06:53:26,766 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 06:53:28,471 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_3.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 81, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_3.tmp_1.lod'
+ERROR 2021-12-30 06:53:28,476 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_3.tmp_1.lod'
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 07:57:06,807 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 07:57:06,807 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 07:57:06,807 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 07:57:06,807 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 07:57:06,807 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 07:57:06,807 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 07:57:06,808 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 07:57:06,808 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 07:57:06,808 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 07:57:06,831 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 07:57:06,832 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 07:57:06,832 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 07:57:06,832 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 07:57:06,878 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 07:57:06,882 [dag.py:816] [DAG] start
+INFO 2021-12-30 07:57:06,883 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 07:57:06,890 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 07:57:06,909 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 07:57:06,910 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 07:57:06,910 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 07:57:08,117 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 07:57:09,361 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 07:57:13,486 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 07:57:13,487 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 07:57:13,487 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 07:57:15,249 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 81, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 07:57:15,253 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:12:07,135 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:12:07,135 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:12:07,135 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:12:07,135 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:12:07,135 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:12:07,135 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:12:07,136 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:12:07,160 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:12:07,161 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:12:07,161 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:12:07,161 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:12:07,206 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:12:07,210 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:12:07,211 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:12:07,218 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:12:07,237 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:12:07,237 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:12:07,237 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:12:08,426 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:12:09,638 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:12:13,041 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:12:13,042 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:12:13,043 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:12:14,700 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 82, in postprocess
+ np_score_list.append(fetch_dict[out_idx])
+KeyError: 0
+ERROR 2021-12-30 08:12:14,707 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:13:45,672 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:13:45,675 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:13:45,675 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:13:45,675 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:13:45,675 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:13:45,675 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:13:45,675 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:13:45,675 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:13:45,700 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:13:45,701 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:13:45,701 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:13:45,701 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:13:45,748 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:13:45,751 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:13:45,752 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:13:45,758 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:13:45,779 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:13:45,780 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:13:45,781 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:13:46,997 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:13:48,252 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:13:50,562 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:13:50,562 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:13:50,563 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:13:52,217 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 83, in postprocess
+ np_score_list.append(fetch_dict[out_idx])
+KeyError: 0
+ERROR 2021-12-30 08:13:52,220 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:17:15,483 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:17:15,483 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:17:15,483 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:17:15,483 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:17:15,483 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:17:15,483 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:17:15,484 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:17:15,508 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:17:15,509 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:17:15,509 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:17:15,509 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:17:15,556 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:17:15,560 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:17:15,560 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:17:15,567 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:17:15,589 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:17:15,589 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:17:15,590 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:17:16,807 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:17:18,022 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:17:20,506 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:17:20,508 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:17:20,509 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:17:22,212 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 83, in postprocess
+ np_score_list.append(fetch_dict[i])
+KeyError: 0
+ERROR 2021-12-30 08:17:22,217 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:17:30,895 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:17:30,900 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:17:30,900 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:17:30,900 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:17:30,900 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:17:30,900 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:17:30,901 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:17:30,901 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:17:30,924 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:17:30,925 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:17:30,925 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:17:30,925 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:17:30,971 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:17:30,976 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:17:30,977 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:17:30,983 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:17:31,007 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:17:31,008 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:17:31,008 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:17:32,203 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:17:33,418 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:17:37,462 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:17:37,464 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:17:37,464 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:17:39,176 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 83, in postprocess
+ np_score_list.append(fetch_dict[i])
+KeyError: 0
+ERROR 2021-12-30 08:17:39,180 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:20:13,195 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:20:13,198 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:20:13,198 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:20:13,198 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:20:13,198 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:20:13,198 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:20:13,198 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:20:13,198 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:20:13,222 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:20:13,223 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:20:13,223 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:20:13,223 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:20:13,267 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:20:13,270 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:20:13,271 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:20:13,276 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:20:13,295 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:20:13,295 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:20:13,296 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:20:14,485 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:20:15,692 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:20:19,232 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:20:19,233 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:20:19,233 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:20:20,892 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 84, in postprocess
+ np_score_list.append(fetch_dict[i])
+KeyError: 0
+ERROR 2021-12-30 08:20:20,896 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:31:54,772 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:31:54,772 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:31:54,772 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:31:54,772 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:31:54,774 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:31:54,774 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:31:54,774 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:31:54,774 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:31:54,774 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:31:54,774 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:31:54,775 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:31:54,775 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:31:54,775 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:31:54,775 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:31:54,799 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:31:54,800 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:31:54,800 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:31:54,800 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:31:54,846 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:31:54,851 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:31:54,852 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:31:54,857 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:31:54,877 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:31:54,877 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:31:54,877 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:31:56,080 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:31:57,306 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:31:59,636 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:31:59,638 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:31:59,638 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:32:01,331 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 91, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:32:01,335 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:34:21,409 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:34:21,409 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:34:21,411 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:34:21,412 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:34:21,412 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:34:21,412 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:34:21,412 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:34:21,412 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:34:21,412 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:34:21,437 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:34:21,438 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:34:21,438 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:34:21,438 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:34:21,482 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:34:21,486 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:34:21,487 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:34:21,493 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:34:21,522 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:34:21,522 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:34:21,523 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:34:22,753 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:34:23,983 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:34:27,034 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:34:27,036 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:34:27,036 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:34:28,756 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 96, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:34:28,761 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:43:32,266 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:43:32,266 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:43:32,267 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:43:32,267 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:43:32,267 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:43:32,267 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:43:32,270 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:43:32,270 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:43:32,270 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:43:32,271 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:43:32,271 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:43:32,271 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:43:32,271 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:43:32,272 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:43:32,301 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:43:32,302 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:43:32,302 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:43:32,302 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:43:32,343 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:43:32,348 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:43:32,349 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:43:32,355 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:43:32,375 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:43:32,376 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:43:32,376 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:43:33,601 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:43:34,822 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:43:44,530 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:43:44,532 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:43:44,532 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:43:46,213 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 111, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:43:46,218 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:49:32,281 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:49:32,281 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:49:32,284 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:49:32,284 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:49:32,284 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:49:32,284 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:49:32,285 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:49:32,286 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:49:32,286 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:49:32,287 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:49:32,287 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:49:32,287 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:49:32,288 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:49:32,288 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:49:32,317 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:49:32,318 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:49:32,318 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:49:32,318 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:49:32,355 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:49:32,359 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:49:32,360 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:49:32,365 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:49:32,387 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:49:32,388 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:49:32,388 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:49:33,652 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:49:34,901 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:49:38,204 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:49:38,206 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:49:38,206 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:49:39,879 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 113, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:49:39,884 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:52:42,378 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:52:42,379 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:52:42,379 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:52:42,379 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:52:42,379 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:52:42,380 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:52:42,380 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:52:42,380 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:52:42,380 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:52:42,382 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:52:42,382 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:52:42,382 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:52:42,383 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:52:42,384 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:52:42,384 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:52:42,384 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:52:42,385 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:52:42,385 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:52:42,386 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:52:42,408 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:52:42,408 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:52:42,408 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:52:42,409 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:52:42,444 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:52:42,448 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:52:42,449 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:52:42,454 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:52:42,478 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:52:42,479 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:52:42,479 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:52:43,723 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:52:44,962 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 08:52:48,301 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 08:52:48,303 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 08:52:48,304 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 08:52:50,006 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 110, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:52:50,011 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:59:43,422 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:59:43,422 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:59:43,422 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:59:43,423 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:59:43,423 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:59:43,423 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:59:43,423 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:59:43,424 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:59:43,424 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:59:43,424 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:59:43,424 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:59:43,426 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 08:59:43,427 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 08:59:43,427 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:59:43,427 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 08:59:43,427 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 08:59:43,428 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 08:59:43,428 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 08:59:43,428 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 08:59:43,457 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 08:59:43,458 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 08:59:43,458 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 08:59:43,458 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 08:59:43,496 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 08:59:43,500 [dag.py:816] [DAG] start
+INFO 2021-12-30 08:59:43,501 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 08:59:43,507 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 08:59:43,525 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 08:59:43,525 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 08:59:43,526 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 08:59:44,766 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 08:59:45,988 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+WARNING 2021-12-30 09:03:09,956 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:03:09,956 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:03:09,961 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:03:09,961 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:03:09,961 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:03:09,962 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:03:09,962 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:03:09,962 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:03:09,962 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:03:09,989 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:03:09,990 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:03:09,990 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:03:09,990 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:03:10,028 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:03:10,033 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:03:10,033 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:03:10,039 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:03:10,058 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:03:10,059 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:03:10,059 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+WARNING 2021-12-30 09:03:11,902 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:03:11,902 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:03:11,902 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:03:11,904 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:03:11,904 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:03:11,904 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:03:11,904 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:03:11,906 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:03:11,907 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:03:11,907 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:03:11,907 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:03:11,908 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:03:11,908 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:03:11,908 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:03:11,938 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:03:11,939 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:03:11,939 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:03:11,939 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:03:11,981 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:03:11,985 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:03:11,986 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:03:11,992 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:03:12,011 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:03:12,011 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:03:12,011 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:03:13,259 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:03:14,522 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:03:20,138 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:03:20,139 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:03:20,139 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:03:22,048 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 102, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 09:03:22,053 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 09:07:27,378 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:07:27,379 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:07:27,379 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:07:27,379 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:07:27,380 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:07:27,380 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:07:27,380 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:07:27,380 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:07:27,381 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:07:27,381 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:07:27,381 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:07:27,381 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:07:27,383 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:07:27,384 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:07:27,384 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:07:27,384 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:07:27,385 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":18083,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:07:27,385 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:07:27,385 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:07:27,415 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:07:27,416 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:07:27,416 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:07:27,416 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:07:27,458 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:07:27,462 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:07:27,463 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:07:27,468 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:07:27,490 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:07:27,491 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:07:27,491 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:07:28,741 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:07:29,959 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+WARNING 2021-12-30 09:07:48,750 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:07:48,751 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:07:48,751 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:07:48,751 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:07:48,751 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:07:48,752 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:07:48,752 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:07:48,752 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:07:48,752 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:07:48,754 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:07:48,754 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:07:48,755 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:07:48,755 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:07:48,755 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:07:48,755 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:07:48,756 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:07:48,756 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:07:48,756 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:07:48,785 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:07:48,785 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:07:48,786 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:07:48,786 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:07:48,827 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:07:48,831 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:07:48,832 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:07:48,838 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:07:48,859 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:07:48,859 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:07:48,860 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:07:50,077 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:07:50,732 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:07:50,733 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:07:50,734 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 09:07:51,330 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+ERROR 2021-12-30 09:07:53,007 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 102, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 09:07:53,011 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 09:08:06,502 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:08:06,503 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:08:06,503 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:08:06,503 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:08:06,504 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:08:06,504 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:08:06,504 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:08:06,504 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:08:06,506 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:08:06,506 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:08:06,506 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:08:06,506 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:08:06,507 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:08:06,507 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:08:06,508 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:08:06,508 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:08:06,508 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:08:06,508 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:08:06,508 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:08:06,533 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:08:06,534 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:08:06,534 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:08:06,534 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:08:06,572 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:08:06,576 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:08:06,577 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:08:06,583 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:08:06,609 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:08:06,610 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:08:06,610 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:08:07,840 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:08:09,064 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:08:09,866 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:08:09,868 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:08:09,868 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:08:11,524 [operator.py:1000] (log_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: output of postprocess funticon must be dict type, but get
+ERROR 2021-12-30 09:08:11,527 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (log_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: output of postprocess funticon must be dict type, but get
+WARNING 2021-12-30 09:19:39,408 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:19:39,409 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:19:39,409 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:19:39,409 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:19:39,409 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:19:39,412 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:19:39,412 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:19:39,413 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:19:39,414 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:19:39,414 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:19:39,414 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:19:39,415 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:19:39,415 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:19:39,415 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:19:39,447 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:19:39,447 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:19:39,447 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:19:39,447 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:19:39,488 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:19:39,492 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:19:39,493 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:19:39,499 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:19:39,520 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:19:39,520 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:19:39,520 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:19:40,827 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:19:42,045 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:19:45,115 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:19:45,116 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:19:45,117 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:19:46,754 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: name 'res_dict' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 101, in postprocess
+ res_dict[b] = {}
+NameError: name 'res_dict' is not defined
+ERROR 2021-12-30 09:19:46,757 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: name 'res_dict' is not defined
+WARNING 2021-12-30 09:20:11,181 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:20:11,181 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:20:11,181 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:20:11,184 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:20:11,184 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:20:11,184 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:20:11,184 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:20:11,184 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:20:11,184 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:20:11,185 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:20:11,204 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:20:11,205 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:20:11,205 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:20:11,205 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:20:11,244 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:20:11,247 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:20:11,248 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:20:11,254 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:20:11,279 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:20:11,280 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:20:11,280 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:20:12,540 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:20:13,094 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:20:13,095 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:20:13,095 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 09:20:13,763 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+ERROR 2021-12-30 09:20:15,417 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: list assignment index out of range
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 102, in postprocess
+ res_dict[b] = {}
+IndexError: list assignment index out of range
+ERROR 2021-12-30 09:20:15,421 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: list assignment index out of range
+WARNING 2021-12-30 09:21:19,542 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:21:19,543 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:21:19,543 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:21:19,543 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:21:19,543 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:21:19,544 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:21:19,544 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:21:19,544 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:21:19,544 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:21:19,546 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:21:19,546 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:21:19,546 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:21:19,547 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:21:19,547 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:21:19,547 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:21:19,547 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:21:19,548 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:21:19,548 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:21:19,548 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:21:19,578 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:21:19,578 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:21:19,579 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:21:19,579 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:21:19,619 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:21:19,624 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:21:19,625 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:21:19,632 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:21:19,655 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:21:19,655 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:21:19,656 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:21:20,877 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:21:22,086 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:21:22,662 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:21:22,663 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:21:22,666 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:21:24,326 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: list assignment index out of range
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 103, in postprocess
+ res_dict[b] = {}
+IndexError: list assignment index out of range
+ERROR 2021-12-30 09:21:24,330 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: list assignment index out of range
+WARNING 2021-12-30 09:23:31,824 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:23:31,824 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:23:31,824 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:23:31,824 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:23:31,825 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:23:31,825 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:23:31,825 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:23:31,825 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:23:31,827 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:23:31,827 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:23:31,827 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:23:31,827 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:23:31,828 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:23:31,829 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:23:31,829 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:23:31,829 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:23:31,830 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:23:31,830 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:23:31,830 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:23:31,860 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:23:31,861 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:23:31,861 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:23:31,861 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:23:31,903 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:23:31,907 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:23:31,907 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:23:31,913 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:23:31,943 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:23:31,943 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:23:31,943 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:23:33,178 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:23:34,435 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:23:35,932 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:23:35,933 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:23:35,934 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:23:37,722 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: name 'a' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 101, in postprocess
+ for b in range(a.ndim):
+NameError: name 'a' is not defined
+ERROR 2021-12-30 09:23:37,726 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: name 'a' is not defined
+WARNING 2021-12-30 09:24:02,842 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:24:02,842 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:24:02,843 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:24:02,843 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:24:02,843 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:24:02,843 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:24:02,846 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:24:02,846 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:24:02,847 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:24:02,847 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:24:02,847 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:24:02,847 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:24:02,847 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:24:02,848 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:24:02,877 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:24:02,878 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:24:02,878 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:24:02,878 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:24:02,920 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:24:02,924 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:24:02,925 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:24:02,931 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:24:02,953 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:24:02,954 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:24:02,954 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:24:04,079 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:24:04,080 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:24:04,081 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 09:24:04,185 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:24:05,401 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+ERROR 2021-12-30 09:24:07,046 [operator.py:1000] (log_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: output of postprocess funticon must be dict type, but get
+ERROR 2021-12-30 09:24:07,049 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (log_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: output of postprocess funticon must be dict type, but get
+WARNING 2021-12-30 09:25:54,030 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:25:54,031 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:25:54,031 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:25:54,031 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:25:54,031 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:25:54,034 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:25:54,034 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:25:54,035 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:25:54,036 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:25:54,036 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:25:54,036 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:25:54,037 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:25:54,037 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:25:54,037 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:25:54,066 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:25:54,066 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:25:54,066 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:25:54,066 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:25:54,109 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:25:54,113 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:25:54,114 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:25:54,120 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:25:54,138 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:25:54,138 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:25:54,139 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:25:55,393 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:25:56,645 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:25:56,651 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:25:56,652 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:25:56,652 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 09:25:58,333 [dag.py:404] (data_id=0 log_id=0) Succ predict
+ERROR 2021-12-30 09:25:58,334 [operator.py:1487] (logid=0) Failed to pack RPC response package:
+WARNING 2021-12-30 09:39:07,135 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:39:07,136 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:39:07,136 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:39:07,136 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:39:07,139 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:39:07,139 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:39:07,139 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:39:07,140 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:39:07,140 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:39:07,140 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:39:07,140 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:39:07,141 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:39:07,141 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:39:07,141 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:39:07,169 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:39:07,170 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:39:07,170 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:39:07,170 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:39:07,210 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:39:07,215 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:39:07,216 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:39:07,223 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:39:07,243 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:39:07,243 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:39:07,244 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:39:08,460 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:39:09,703 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:39:25,749 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:39:25,750 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:39:25,751 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 09:39:27,547 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2021-12-30 09:40:00,135 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:40:00,136 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:40:00,136 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:40:00,249 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:40:00,253 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+INFO 2021-12-30 09:40:48,641 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:40:48,642 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:40:48,642 [dag.py:368] (data_id=2 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:40:48,711 [operator.py:973] (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:40:48,714 [dag.py:409] (data_id=2 log_id=0) Failed to predict: (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+INFO 2021-12-30 09:40:53,612 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:40:53,613 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:40:53,614 [dag.py:368] (data_id=3 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:40:53,720 [operator.py:973] (data_id=3 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:40:53,723 [dag.py:409] (data_id=3 log_id=0) Failed to predict: (data_id=3 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 09:42:01,778 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:42:01,779 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:42:01,779 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:42:01,779 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:42:01,779 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:42:01,780 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:42:01,780 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:42:01,780 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:42:01,780 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:42:01,782 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:42:01,782 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:42:01,782 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:42:01,783 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:42:01,783 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:42:01,784 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:42:01,784 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:42:01,784 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:42:01,784 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:42:01,784 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:42:01,813 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:42:01,814 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:42:01,814 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:42:01,814 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:42:01,853 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:42:01,858 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:42:01,859 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:42:01,865 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:42:01,889 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:42:01,889 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:42:01,889 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:42:03,109 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:42:04,298 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:42:04,956 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:42:04,958 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:42:04,958 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:42:06,612 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 09:42:06,616 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+INFO 2021-12-30 09:42:19,843 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:42:19,844 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:42:19,845 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:42:19,953 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 09:42:19,957 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+WARNING 2021-12-30 09:42:23,963 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:42:23,963 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:42:23,963 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:42:23,963 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:42:23,964 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:42:23,964 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:42:23,964 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:42:23,964 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:42:23,965 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:42:23,965 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:42:23,965 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:42:23,965 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:42:23,966 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:42:23,966 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:42:23,966 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:42:23,966 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:42:23,967 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:42:23,968 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:42:23,968 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:42:23,968 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:42:23,969 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:42:23,969 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:42:23,970 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:42:23,970 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:42:24,000 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:42:24,001 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:42:24,001 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:42:24,001 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:42:24,042 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:42:24,047 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:42:24,048 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:42:24,054 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:42:24,072 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:42:24,073 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:42:24,073 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:42:25,280 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:42:26,470 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:42:26,701 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:42:26,702 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:42:26,702 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 09:42:28,405 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2021-12-30 09:43:10,878 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:43:10,879 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:43:10,879 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:43:10,985 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:43:10,988 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+INFO 2021-12-30 09:43:15,839 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:43:15,840 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:43:15,840 [dag.py:368] (data_id=2 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:43:15,944 [operator.py:973] (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:43:15,950 [dag.py:409] (data_id=2 log_id=0) Failed to predict: (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 09:44:54,821 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:44:54,822 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:44:54,822 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:44:54,822 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:44:54,822 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:44:54,824 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:44:54,824 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:44:54,824 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:44:54,824 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:44:54,825 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:44:54,825 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:44:54,825 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:44:54,826 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:44:54,826 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:44:54,826 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:44:54,826 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:44:54,827 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:44:54,827 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:44:54,827 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:44:54,856 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:44:54,856 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:44:54,857 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:44:54,857 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:44:54,898 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:44:54,901 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:44:54,902 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:44:54,908 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:44:54,927 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:44:54,927 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:44:54,927 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:44:56,140 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:44:57,521 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:44:59,119 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:44:59,121 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:44:59,122 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 09:45:00,979 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2021-12-30 09:45:05,363 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:45:05,364 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:45:05,364 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:45:05,468 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:45:05,472 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 09:46:17,679 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:46:17,681 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:46:17,681 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:46:17,681 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:46:17,681 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:46:17,683 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:46:17,683 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 09:46:17,684 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 09:46:17,684 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:46:17,684 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 09:46:17,684 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 09:46:17,685 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 09:46:17,685 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 09:46:17,685 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 09:46:17,714 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 09:46:17,715 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 09:46:17,715 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 09:46:17,715 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 09:46:17,756 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 09:46:17,760 [dag.py:816] [DAG] start
+INFO 2021-12-30 09:46:17,761 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 09:46:17,766 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 09:46:17,795 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 09:46:17,796 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 09:46:17,796 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 09:46:19,035 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 09:46:20,237 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 09:46:21,068 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:46:21,070 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:46:21,071 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 09:46:22,764 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2021-12-30 09:46:25,462 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 09:46:25,463 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 09:46:25,463 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+ERROR 2021-12-30 09:46:25,584 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:46:25,588 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+INFO 2021-12-30 11:05:40,431 [pipeline_server.py:51] (log_id=0) inference request name:recognition self.name:ppyolo_mbv3
+ERROR 2021-12-30 11:05:40,432 [pipeline_server.py:55] (log_id=0) name dismatch error. request.name:recognition,server.name=ppyolo_mbv3
+INFO 2021-12-30 11:05:49,163 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:05:49,164 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:05:49,164 [dag.py:368] (data_id=2 log_id=0) Succ Generate ID
+ERROR 2021-12-30 11:05:49,271 [operator.py:973] (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 11:05:49,274 [dag.py:409] (data_id=2 log_id=0) Failed to predict: (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 11:05:54,559 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:05:54,561 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:05:54,561 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:05:54,561 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:05:54,561 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:05:54,562 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:05:54,562 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:05:54,562 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:05:54,562 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:05:54,563 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:05:54,563 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:05:54,563 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 11:05:54,564 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 11:05:54,564 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:05:54,565 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 11:05:54,565 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 11:05:54,566 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 11:05:54,566 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 11:05:54,566 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 11:05:54,596 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 11:05:54,597 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 11:05:54,597 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 11:05:54,597 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 11:05:54,636 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 11:05:54,641 [dag.py:816] [DAG] start
+INFO 2021-12-30 11:05:54,642 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 11:05:54,648 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 11:05:54,671 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:05:54,671 [operator.py:1167] Init cuda env in process 0
+INFO 2021-12-30 11:05:54,672 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 11:05:55,892 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 11:05:56,096 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:05:56,097 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:05:56,098 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 11:05:57,079 [operator.py:1178] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 11:05:58,729 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2021-12-30 11:15:50,113 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:15:50,115 [operator.py:1426] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:15:50,115 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+ERROR 2021-12-30 11:15:50,225 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 11:15:50,229 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 11:24:24,428 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:24:24,428 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:24:24,428 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:24:24,431 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:24:24,431 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:24:24,431 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:24:24,431 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 11:24:24,432 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 11:24:24,432 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:24:24,432 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 11:24:24,433 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 11:24:24,433 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 11:24:24,433 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 11:24:24,433 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 11:24:24,462 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 11:24:24,463 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 11:24:24,463 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 11:24:24,463 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 11:24:24,503 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 11:24:24,508 [dag.py:816] [DAG] start
+INFO 2021-12-30 11:24:24,508 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 11:24:24,513 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 11:24:24,533 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:24:24,534 [operator.py:1170] Init cuda env in process 0
+INFO 2021-12-30 11:24:24,534 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 11:24:25,753 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 11:24:26,946 [operator.py:1181] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 11:24:28,169 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:24:28,171 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:24:28,171 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 11:24:30,117 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2021-12-30 11:24:39,879 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:24:39,880 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:24:39,880 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+ERROR 2021-12-30 11:24:40,005 [operator.py:976] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 11:24:40,009 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 11:37:49,610 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:37:49,611 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:37:49,611 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:37:49,611 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:37:49,611 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:37:49,612 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:37:49,612 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:37:49,612 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:37:49,612 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:37:49,614 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:37:49,614 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:37:49,614 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 11:37:49,615 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 11:37:49,615 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:37:49,616 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 11:37:49,616 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 11:37:49,617 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 11:37:49,617 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 11:37:49,617 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 11:37:49,647 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 11:37:49,648 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 11:37:49,648 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 11:37:49,648 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 11:37:49,689 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 11:37:49,693 [dag.py:816] [DAG] start
+INFO 2021-12-30 11:37:49,693 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 11:37:49,699 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 11:37:49,728 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:37:49,728 [operator.py:1170] Init cuda env in process 0
+INFO 2021-12-30 11:37:49,729 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 11:37:50,942 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 11:37:52,133 [operator.py:1181] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 11:37:52,375 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:37:52,377 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:37:52,377 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+ERROR 2021-12-30 11:37:54,033 [operator.py:976] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 92, in postprocess
+ np_boxes, np_boxes_num = self.postprocess(np_score_list, np_boxes_list)
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:37:54,039 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+INFO 2021-12-30 11:37:55,868 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:37:55,869 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:37:55,869 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+ERROR 2021-12-30 11:37:55,960 [operator.py:976] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 92, in postprocess
+ np_boxes, np_boxes_num = self.postprocess(np_score_list, np_boxes_list)
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:37:55,963 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+WARNING 2021-12-30 11:38:09,257 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:38:09,258 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:38:09,258 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:38:09,258 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:38:09,258 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:38:09,259 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:38:09,259 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:38:09,259 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:38:09,259 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:38:09,261 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:38:09,261 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:38:09,261 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 11:38:09,262 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 11:38:09,262 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:38:09,262 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 11:38:09,262 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 11:38:09,263 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 11:38:09,263 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 11:38:09,263 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 11:38:09,291 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 11:38:09,292 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 11:38:09,292 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 11:38:09,292 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 11:38:09,331 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 11:38:09,335 [dag.py:816] [DAG] start
+INFO 2021-12-30 11:38:09,336 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 11:38:09,342 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 11:38:09,360 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:38:09,361 [operator.py:1170] Init cuda env in process 0
+INFO 2021-12-30 11:38:09,361 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 11:38:10,578 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 11:38:10,613 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:38:10,615 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:38:10,615 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 11:38:11,767 [operator.py:1181] [ppyolo_mbv3|0] Succ init
+ERROR 2021-12-30 11:38:13,407 [operator.py:976] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 92, in postprocess
+ np_boxes, np_boxes_num = self.postprocess(np_score_list, np_boxes_list)
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:38:13,411 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+INFO 2021-12-30 11:40:01,272 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:40:01,273 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:40:01,274 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+ERROR 2021-12-30 11:40:01,379 [operator.py:976] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 92, in postprocess
+ self.post_process = PicoDetPostProcess(
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:40:01,383 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+WARNING 2021-12-30 11:40:05,557 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:40:05,557 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 11:40:05,561 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 11:40:05,561 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:40:05,562 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 11:40:05,562 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 11:40:05,562 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 11:40:05,562 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 11:40:05,562 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 11:40:05,592 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 11:40:05,593 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 11:40:05,593 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 11:40:05,593 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 11:40:05,633 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 11:40:05,638 [dag.py:816] [DAG] start
+INFO 2021-12-30 11:40:05,639 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 11:40:05,646 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 11:40:05,665 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:40:05,665 [operator.py:1170] Init cuda env in process 0
+INFO 2021-12-30 11:40:05,665 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 11:40:06,892 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 11:40:08,120 [operator.py:1181] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 11:40:08,937 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:40:08,939 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:40:08,939 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 11:40:10,596 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2021-12-30 11:40:13,169 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:40:13,169 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:40:13,170 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+INFO 2021-12-30 11:40:13,272 [dag.py:404] (data_id=1 log_id=0) Succ predict
+INFO 2021-12-30 11:40:59,103 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:40:59,104 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:40:59,105 [dag.py:368] (data_id=2 log_id=0) Succ Generate ID
+INFO 2021-12-30 11:40:59,221 [dag.py:404] (data_id=2 log_id=0) Succ predict
+WARNING 2021-12-30 11:41:26,917 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:41:26,921 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2021-12-30 11:41:26,922 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2021-12-30 11:41:26,922 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:41:26,922 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2021-12-30 11:41:26,923 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2021-12-30 11:41:26,923 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2021-12-30 11:41:26,924 [pipeline_server.py:212] -------------------------------------------
+INFO 2021-12-30 11:41:26,924 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2021-12-30 11:41:26,953 [dag.py:493] [DAG] Succ init
+INFO 2021-12-30 11:41:26,954 [dag.py:651] ================= USED OP =================
+INFO 2021-12-30 11:41:26,954 [dag.py:654] ppyolo_mbv3
+INFO 2021-12-30 11:41:26,954 [dag.py:655] -------------------------------------------
+INFO 2021-12-30 11:41:26,993 [dag.py:784] [DAG] Succ build DAG
+INFO 2021-12-30 11:41:26,997 [dag.py:816] [DAG] start
+INFO 2021-12-30 11:41:26,997 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2021-12-30 11:41:27,003 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2021-12-30 11:41:27,026 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2021-12-30 11:41:27,027 [operator.py:1170] Init cuda env in process 0
+INFO 2021-12-30 11:41:27,027 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2021-12-30 11:41:28,253 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2021-12-30 11:41:28,845 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:41:28,846 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:41:28,847 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2021-12-30 11:41:29,451 [operator.py:1181] [ppyolo_mbv3|0] Succ init
+INFO 2021-12-30 11:41:31,104 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2021-12-30 11:41:33,109 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2021-12-30 11:41:33,110 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2021-12-30 11:41:33,110 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+INFO 2021-12-30 11:41:33,209 [dag.py:404] (data_id=1 log_id=0) Succ predict
+WARNING 2022-02-14 09:24:33,065 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-14 09:24:33,074 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-14 09:24:33,074 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-14 09:24:33,074 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-14 09:24:33,074 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-14 09:24:33,074 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-14 09:24:33,075 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-14 09:24:33,075 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2022-02-14 09:24:33,075 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2022-02-14 09:24:33,075 [pipeline_server.py:212] -------------------------------------------
+INFO 2022-02-14 09:24:33,075 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2022-02-14 09:24:33,096 [dag.py:493] [DAG] Succ init
+INFO 2022-02-14 09:24:33,096 [dag.py:651] ================= USED OP =================
+INFO 2022-02-14 09:24:33,097 [dag.py:654] ppyolo_mbv3
+INFO 2022-02-14 09:24:33,097 [dag.py:655] -------------------------------------------
+INFO 2022-02-14 09:24:33,137 [dag.py:784] [DAG] Succ build DAG
+INFO 2022-02-14 09:24:33,141 [dag.py:816] [DAG] start
+INFO 2022-02-14 09:24:33,142 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-14 09:24:33,147 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2022-02-14 09:24:33,174 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-14 09:24:33,175 [operator.py:1170] Init cuda env in process 0
+INFO 2022-02-14 09:24:33,175 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-14 09:24:34,409 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2022-02-14 09:24:35,602 [operator.py:1179] [ppyolo_mbv3|0] failed to init op: [Errno 2] No such file or directory: 'label_list.txt'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1174, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1363, in _initialize
+ self.init_op()
+ File "web_service.py", line 30, in init_op
+ self.img_postprocess = RCNNPostprocess("label_list.txt", "output")
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 288, in __init__
+ with open(label_file) as fin:
+FileNotFoundError: [Errno 2] No such file or directory: 'label_list.txt'
+WARNING 2022-02-14 09:24:42,704 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-14 09:24:42,705 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:24:42,705 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-14 09:24:42,705 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-14 09:24:42,705 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-14 09:24:42,706 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-14 09:24:42,706 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:24:42,706 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-14 09:24:42,706 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-14 09:24:42,707 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-14 09:24:42,707 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-14 09:24:42,707 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-14 09:24:42,707 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-14 09:24:42,708 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-14 09:24:42,708 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-14 09:24:42,708 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-14 09:24:42,708 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-14 09:24:42,709 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-14 09:24:42,709 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-14 09:24:42,710 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-14 09:24:42,710 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2022-02-14 09:24:42,710 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2022-02-14 09:24:42,710 [pipeline_server.py:212] -------------------------------------------
+INFO 2022-02-14 09:24:42,710 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2022-02-14 09:24:42,741 [dag.py:493] [DAG] Succ init
+INFO 2022-02-14 09:24:42,741 [dag.py:651] ================= USED OP =================
+INFO 2022-02-14 09:24:42,741 [dag.py:654] ppyolo_mbv3
+INFO 2022-02-14 09:24:42,742 [dag.py:655] -------------------------------------------
+INFO 2022-02-14 09:24:42,783 [dag.py:784] [DAG] Succ build DAG
+INFO 2022-02-14 09:24:42,789 [dag.py:816] [DAG] start
+INFO 2022-02-14 09:24:42,790 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-14 09:24:42,796 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2022-02-14 09:24:42,815 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-14 09:24:42,815 [operator.py:1170] Init cuda env in process 0
+INFO 2022-02-14 09:24:42,815 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-14 09:24:44,053 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+CRITICAL 2022-02-14 09:24:45,275 [operator.py:1179] [ppyolo_mbv3|0] failed to init op: [Errno 2] No such file or directory: 'label_list.txt'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1174, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1363, in _initialize
+ self.init_op()
+ File "web_service.py", line 30, in init_op
+ self.img_postprocess = RCNNPostprocess("label_list.txt", "output")
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 288, in __init__
+ with open(label_file) as fin:
+FileNotFoundError: [Errno 2] No such file or directory: 'label_list.txt'
+INFO 2022-02-14 09:24:57,166 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2022-02-14 09:24:57,168 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2022-02-14 09:24:57,169 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+WARNING 2022-02-14 09:26:03,671 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-14 09:26:03,671 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:26:03,671 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-14 09:26:03,672 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-14 09:26:03,672 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-14 09:26:03,672 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-14 09:26:03,672 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:26:03,673 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-14 09:26:03,673 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-14 09:26:03,673 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-14 09:26:03,673 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-14 09:26:03,675 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-14 09:26:03,676 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-14 09:26:03,676 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-14 09:26:03,676 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-14 09:26:03,676 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2022-02-14 09:26:03,677 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2022-02-14 09:26:03,677 [pipeline_server.py:212] -------------------------------------------
+INFO 2022-02-14 09:26:03,677 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2022-02-14 09:26:03,706 [dag.py:493] [DAG] Succ init
+INFO 2022-02-14 09:26:03,707 [dag.py:651] ================= USED OP =================
+INFO 2022-02-14 09:26:03,707 [dag.py:654] ppyolo_mbv3
+INFO 2022-02-14 09:26:03,707 [dag.py:655] -------------------------------------------
+INFO 2022-02-14 09:26:03,747 [dag.py:784] [DAG] Succ build DAG
+INFO 2022-02-14 09:26:03,752 [dag.py:816] [DAG] start
+INFO 2022-02-14 09:26:03,753 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-14 09:26:03,761 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2022-02-14 09:26:03,776 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-14 09:26:03,777 [operator.py:1170] Init cuda env in process 0
+INFO 2022-02-14 09:26:03,777 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-14 09:26:04,993 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-14 09:26:05,574 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2022-02-14 09:26:05,576 [operator.py:1429] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2022-02-14 09:26:05,577 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-14 09:26:06,239 [operator.py:1181] [ppyolo_mbv3|0] Succ init
+INFO 2022-02-14 09:26:07,900 [dag.py:404] (data_id=0 log_id=0) Succ predict
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-16 16:56:51,846 [operator.py:181] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-16 16:56:51,847 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-16 16:56:51,847 [operator.py:285] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-16 16:56:51,847 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-16 16:56:51,847 [pipeline_server.py:218]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "channel_recv_frist_arrive":false
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2022-02-16 16:56:51,847 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-16 16:56:51,847 [operator.py:308] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2022-02-16 16:56:51,869 [dag.py:496] [DAG] Succ init
+INFO 2022-02-16 16:56:51,869 [dag.py:659] ================= USED OP =================
+INFO 2022-02-16 16:56:51,869 [dag.py:662] ppyolo_mbv3
+INFO 2022-02-16 16:56:51,870 [dag.py:663] -------------------------------------------
+INFO 2022-02-16 16:56:51,870 [dag.py:680] ================== DAG ====================
+INFO 2022-02-16 16:56:51,870 [dag.py:682] (VIEW 0)
+INFO 2022-02-16 16:56:51,870 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-16 16:56:51,870 [dag.py:686] - ppyolo_mbv3
+INFO 2022-02-16 16:56:51,870 [dag.py:682] (VIEW 1)
+INFO 2022-02-16 16:56:51,870 [dag.py:684] [ppyolo_mbv3]
+INFO 2022-02-16 16:56:51,870 [dag.py:687] -------------------------------------------
+INFO 2022-02-16 16:56:51,885 [dag.py:730] op:ppyolo_mbv3 add input channel.
+INFO 2022-02-16 16:56:51,895 [dag.py:759] last op:ppyolo_mbv3 add output channel
+INFO 2022-02-16 16:56:51,895 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-16 16:56:51,899 [dag.py:832] [DAG] start
+INFO 2022-02-16 16:56:51,899 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-16 16:56:51,905 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-16 16:56:51,911 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-16 16:56:51,912 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-16 16:56:51,912 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-16 16:56:52,885 [local_predict.py:153] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-16 16:56:55,000 [operator.py:1317] [ppyolo_mbv3|0] Succ init
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-16 17:05:23,154 [operator.py:181] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-16 17:05:23,154 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-16 17:05:23,154 [operator.py:285] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-16 17:05:23,154 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-16 17:05:23,155 [pipeline_server.py:218]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "channel_recv_frist_arrive":false
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2022-02-16 17:05:23,155 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-16 17:05:23,155 [operator.py:308] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2022-02-16 17:05:23,176 [dag.py:496] [DAG] Succ init
+INFO 2022-02-16 17:05:23,177 [dag.py:659] ================= USED OP =================
+INFO 2022-02-16 17:05:23,177 [dag.py:662] ppyolo_mbv3
+INFO 2022-02-16 17:05:23,177 [dag.py:663] -------------------------------------------
+INFO 2022-02-16 17:05:23,177 [dag.py:680] ================== DAG ====================
+INFO 2022-02-16 17:05:23,177 [dag.py:682] (VIEW 0)
+INFO 2022-02-16 17:05:23,177 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-16 17:05:23,177 [dag.py:686] - ppyolo_mbv3
+INFO 2022-02-16 17:05:23,177 [dag.py:682] (VIEW 1)
+INFO 2022-02-16 17:05:23,177 [dag.py:684] [ppyolo_mbv3]
+INFO 2022-02-16 17:05:23,177 [dag.py:687] -------------------------------------------
+INFO 2022-02-16 17:05:23,192 [dag.py:730] op:ppyolo_mbv3 add input channel.
+INFO 2022-02-16 17:05:23,202 [dag.py:759] last op:ppyolo_mbv3 add output channel
+INFO 2022-02-16 17:05:23,202 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-16 17:05:23,205 [dag.py:832] [DAG] start
+INFO 2022-02-16 17:05:23,206 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-16 17:05:23,211 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-16 17:05:23,229 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-16 17:05:23,229 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-16 17:05:23,229 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-16 17:05:24,167 [local_predict.py:153] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-16 17:05:26,236 [operator.py:1317] [ppyolo_mbv3|0] Succ init
+INFO 2022-02-16 17:05:47,771 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645002347.7711458
+INFO 2022-02-16 17:05:47,772 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645002347.7724555
+INFO 2022-02-16 17:05:47,772 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:05:50,421 [dag.py:405] (data_id=0 log_id=0) Succ predict
+INFO 2022-02-16 17:05:50,447 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645002350.4476814
+INFO 2022-02-16 17:05:50,448 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645002350.448209
+INFO 2022-02-16 17:05:50,448 [dag.py:369] (data_id=1 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:05:50,527 [dag.py:405] (data_id=1 log_id=0) Succ predict
+INFO 2022-02-16 17:07:00,293 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645002420.2930179
+INFO 2022-02-16 17:07:00,293 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645002420.2936485
+INFO 2022-02-16 17:07:00,293 [dag.py:369] (data_id=2 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:07:00,362 [dag.py:405] (data_id=2 log_id=0) Succ predict
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-16 17:09:22,057 [operator.py:181] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-16 17:09:22,058 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-16 17:09:22,058 [operator.py:285] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['save_infer_model/scale_0.tmp_1', 'save_infer_model/scale_1.tmp_1', 'save_infer_model/scale_2.tmp_1', 'save_infer_model/scale_3.tmp_1', 'save_infer_model/scale_4.tmp_1', 'save_infer_model/scale_5.tmp_1', 'save_infer_model/scale_6.tmp_1', 'save_infer_model/scale_7.tmp_1']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-16 17:09:22,058 [pipeline_server.py:215] ============= PIPELINE SERVER =============
+INFO 2022-02-16 17:09:22,058 [pipeline_server.py:218]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0,
+ "channel_recv_frist_arrive":false
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "save_infer_model/scale_0.tmp_1",
+ "save_infer_model/scale_1.tmp_1",
+ "save_infer_model/scale_2.tmp_1",
+ "save_infer_model/scale_3.tmp_1",
+ "save_infer_model/scale_4.tmp_1",
+ "save_infer_model/scale_5.tmp_1",
+ "save_infer_model/scale_6.tmp_1",
+ "save_infer_model/scale_7.tmp_1"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2022-02-16 17:09:22,058 [pipeline_server.py:223] -------------------------------------------
+INFO 2022-02-16 17:09:22,058 [operator.py:308] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2022-02-16 17:09:22,079 [dag.py:496] [DAG] Succ init
+INFO 2022-02-16 17:09:22,080 [dag.py:659] ================= USED OP =================
+INFO 2022-02-16 17:09:22,080 [dag.py:662] ppyolo_mbv3
+INFO 2022-02-16 17:09:22,080 [dag.py:663] -------------------------------------------
+INFO 2022-02-16 17:09:22,080 [dag.py:680] ================== DAG ====================
+INFO 2022-02-16 17:09:22,080 [dag.py:682] (VIEW 0)
+INFO 2022-02-16 17:09:22,080 [dag.py:684] [@DAGExecutor]
+INFO 2022-02-16 17:09:22,080 [dag.py:686] - ppyolo_mbv3
+INFO 2022-02-16 17:09:22,081 [dag.py:682] (VIEW 1)
+INFO 2022-02-16 17:09:22,081 [dag.py:684] [ppyolo_mbv3]
+INFO 2022-02-16 17:09:22,081 [dag.py:687] -------------------------------------------
+INFO 2022-02-16 17:09:22,095 [dag.py:730] op:ppyolo_mbv3 add input channel.
+INFO 2022-02-16 17:09:22,105 [dag.py:759] last op:ppyolo_mbv3 add output channel
+INFO 2022-02-16 17:09:22,105 [dag.py:800] [DAG] Succ build DAG
+INFO 2022-02-16 17:09:22,108 [dag.py:832] [DAG] start
+INFO 2022-02-16 17:09:22,109 [dag.py:182] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-16 17:09:22,114 [pipeline_server.py:51] [PipelineServicer] succ init
+INFO 2022-02-16 17:09:22,119 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-16 17:09:22,120 [operator.py:1306] Init cuda env in process 0
+INFO 2022-02-16 17:09:22,120 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-16 17:09:23,058 [local_predict.py:153] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-16 17:09:24,980 [operator.py:1317] [ppyolo_mbv3|0] Succ init
+INFO 2022-02-16 17:10:00,414 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645002600.4143305
+INFO 2022-02-16 17:10:00,415 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645002600.4155881
+INFO 2022-02-16 17:10:00,416 [dag.py:369] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:10:02,981 [dag.py:405] (data_id=0 log_id=0) Succ predict
+INFO 2022-02-16 17:14:52,096 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645002892.0961268
+INFO 2022-02-16 17:14:52,096 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645002892.096749
+INFO 2022-02-16 17:14:52,097 [dag.py:369] (data_id=1 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:14:52,172 [dag.py:405] (data_id=1 log_id=0) Succ predict
+INFO 2022-02-16 17:15:06,391 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645002906.390965
+INFO 2022-02-16 17:15:06,391 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645002906.3916032
+INFO 2022-02-16 17:15:06,391 [dag.py:369] (data_id=2 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:15:06,459 [dag.py:405] (data_id=2 log_id=0) Succ predict
+INFO 2022-02-16 17:21:25,074 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003285.0745604
+INFO 2022-02-16 17:21:25,075 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003285.075034
+INFO 2022-02-16 17:21:25,075 [dag.py:369] (data_id=3 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:21:25,141 [dag.py:405] (data_id=3 log_id=0) Succ predict
+INFO 2022-02-16 17:21:53,963 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003313.9639173
+INFO 2022-02-16 17:21:53,964 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003313.9646337
+INFO 2022-02-16 17:21:53,964 [dag.py:369] (data_id=4 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:21:54,033 [dag.py:405] (data_id=4 log_id=0) Succ predict
+INFO 2022-02-16 17:22:47,816 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003367.8165774
+INFO 2022-02-16 17:22:47,817 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003367.8172152
+INFO 2022-02-16 17:22:47,817 [dag.py:369] (data_id=5 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:22:47,886 [dag.py:405] (data_id=5 log_id=0) Succ predict
+INFO 2022-02-16 17:23:03,882 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003383.882905
+INFO 2022-02-16 17:23:03,883 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003383.8835778
+INFO 2022-02-16 17:23:03,883 [dag.py:369] (data_id=6 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:23:03,951 [dag.py:405] (data_id=6 log_id=0) Succ predict
+INFO 2022-02-16 17:23:30,593 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003410.5935636
+INFO 2022-02-16 17:23:30,594 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003410.5942206
+INFO 2022-02-16 17:23:30,594 [dag.py:369] (data_id=7 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:23:30,662 [dag.py:405] (data_id=7 log_id=0) Succ predict
+INFO 2022-02-16 17:24:12,780 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003452.7805502
+INFO 2022-02-16 17:24:12,781 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003452.7814457
+INFO 2022-02-16 17:24:12,781 [dag.py:369] (data_id=8 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:24:12,853 [dag.py:405] (data_id=8 log_id=0) Succ predict
+INFO 2022-02-16 17:24:23,797 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003463.7971623
+INFO 2022-02-16 17:24:23,797 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003463.7978337
+INFO 2022-02-16 17:24:23,798 [dag.py:369] (data_id=9 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:24:23,867 [dag.py:405] (data_id=9 log_id=0) Succ predict
+INFO 2022-02-16 17:24:43,980 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003483.9801416
+INFO 2022-02-16 17:24:43,980 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003483.9806335
+INFO 2022-02-16 17:24:43,980 [dag.py:369] (data_id=10 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:24:44,049 [dag.py:405] (data_id=10 log_id=0) Succ predict
+INFO 2022-02-16 17:24:54,159 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003494.15903
+INFO 2022-02-16 17:24:54,159 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003494.1595104
+INFO 2022-02-16 17:24:54,159 [dag.py:369] (data_id=11 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:24:54,226 [dag.py:405] (data_id=11 log_id=0) Succ predict
+INFO 2022-02-16 17:25:12,386 [pipeline_server.py:56] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3 time:1645003512.3861694
+INFO 2022-02-16 17:25:12,386 [operator.py:1723] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction, time:1645003512.3868122
+INFO 2022-02-16 17:25:12,387 [dag.py:369] (data_id=12 log_id=0) Succ Generate ID
+INFO 2022-02-16 17:25:12,455 [dag.py:405] (data_id=12 log_id=0) Succ predict
+WARNING 2022-02-22 13:07:32,971 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:07:32,971 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:07:32,971 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:07:32,971 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-22 13:07:32,973 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['transpose_1.tmp_0', 'transpose_2.tmp_0', 'transpose_3.tmp_0', 'transpose_4.tmp_0', 'transpose_5.tmp_0', 'transpose_6.tmp_0', 'transpose_7.tmp_0', 'transpose_0.tmp_0'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-22 13:07:32,973 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['transpose_1.tmp_0', 'transpose_2.tmp_0', 'transpose_3.tmp_0', 'transpose_4.tmp_0', 'transpose_5.tmp_0', 'transpose_6.tmp_0', 'transpose_7.tmp_0', 'transpose_0.tmp_0'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:07:32,973 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['transpose_1.tmp_0', 'transpose_2.tmp_0', 'transpose_3.tmp_0', 'transpose_4.tmp_0', 'transpose_5.tmp_0', 'transpose_6.tmp_0', 'transpose_7.tmp_0', 'transpose_0.tmp_0']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-22 13:07:32,973 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2022-02-22 13:07:32,973 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "transpose_1.tmp_0",
+ "transpose_2.tmp_0",
+ "transpose_3.tmp_0",
+ "transpose_4.tmp_0",
+ "transpose_5.tmp_0",
+ "transpose_6.tmp_0",
+ "transpose_7.tmp_0",
+ "transpose_0.tmp_0"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2022-02-22 13:07:32,973 [pipeline_server.py:212] -------------------------------------------
+INFO 2022-02-22 13:07:32,973 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2022-02-22 13:07:33,002 [dag.py:493] [DAG] Succ init
+INFO 2022-02-22 13:07:33,003 [dag.py:651] ================= USED OP =================
+INFO 2022-02-22 13:07:33,003 [dag.py:654] ppyolo_mbv3
+INFO 2022-02-22 13:07:33,003 [dag.py:655] -------------------------------------------
+INFO 2022-02-22 13:07:33,032 [dag.py:784] [DAG] Succ build DAG
+INFO 2022-02-22 13:07:33,038 [dag.py:816] [DAG] start
+INFO 2022-02-22 13:07:33,039 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-22 13:07:33,046 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2022-02-22 13:07:33,051 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:07:33,052 [operator.py:1162] Init cuda env in process 0
+INFO 2022-02-22 13:07:33,052 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-22 13:07:34,092 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-22 13:07:35,740 [operator.py:1173] [ppyolo_mbv3|0] Succ init
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+INFO 2022-02-22 13:08:48,080 [operator.py:163] local_service_conf: {'client_type': 'local_predictor', 'device_type': 2, 'devices': '0', 'fetch_list': ['transpose_1.tmp_0', 'transpose_2.tmp_0', 'transpose_3.tmp_0', 'transpose_4.tmp_0', 'transpose_5.tmp_0', 'transpose_6.tmp_0', 'transpose_7.tmp_0', 'transpose_0.tmp_0'], 'model_config': 'serving_server/', 'workdir': '', 'thread_num': 2, 'mem_optim': True, 'ir_optim': False, 'precision': 'fp32', 'use_calib': False, 'use_mkldnn': False, 'mkldnn_cache_capacity': 0}
+INFO 2022-02-22 13:08:48,081 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:['transpose_1.tmp_0', 'transpose_2.tmp_0', 'transpose_3.tmp_0', 'transpose_4.tmp_0', 'transpose_5.tmp_0', 'transpose_6.tmp_0', 'transpose_7.tmp_0', 'transpose_0.tmp_0'], precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:08:48,081 [operator.py:267] ppyolo_mbv3
+ input_ops: @DAGExecutor,
+ server_endpoints: None
+ fetch_list: ['transpose_1.tmp_0', 'transpose_2.tmp_0', 'transpose_3.tmp_0', 'transpose_4.tmp_0', 'transpose_5.tmp_0', 'transpose_6.tmp_0', 'transpose_7.tmp_0', 'transpose_0.tmp_0']
+ client_config: serving_server/serving_server_conf.prototxt
+ concurrency: 1,
+ timeout(s): -1,
+ retry: 1,
+ batch_size: 1,
+ auto_batching_timeout(s): None
+INFO 2022-02-22 13:08:48,081 [pipeline_server.py:204] ============= PIPELINE SERVER =============
+INFO 2022-02-22 13:08:48,081 [pipeline_server.py:207]
+{
+ "dag":{
+ "is_thread_op":false,
+ "tracer":{
+ "interval_s":30
+ },
+ "retry":1,
+ "client_type":"brpc",
+ "use_profile":false,
+ "channel_size":0
+ },
+ "http_port":2009,
+ "op":{
+ "ppyolo_mbv3":{
+ "concurrency":1,
+ "local_service_conf":{
+ "client_type":"local_predictor",
+ "device_type":2,
+ "devices":"0",
+ "fetch_list":[
+ "transpose_1.tmp_0",
+ "transpose_2.tmp_0",
+ "transpose_3.tmp_0",
+ "transpose_4.tmp_0",
+ "transpose_5.tmp_0",
+ "transpose_6.tmp_0",
+ "transpose_7.tmp_0",
+ "transpose_0.tmp_0"
+ ],
+ "model_config":"serving_server/",
+ "workdir":"",
+ "thread_num":2,
+ "mem_optim":true,
+ "ir_optim":false,
+ "precision":"fp32",
+ "use_calib":false,
+ "use_mkldnn":false,
+ "mkldnn_cache_capacity":0
+ },
+ "timeout":-1,
+ "retry":1,
+ "batch_size":1,
+ "auto_batching_timeout":-1
+ }
+ },
+ "rpc_port":9999,
+ "worker_num":20,
+ "build_dag_each_worker":false
+}
+INFO 2022-02-22 13:08:48,081 [pipeline_server.py:212] -------------------------------------------
+INFO 2022-02-22 13:08:48,081 [operator.py:290] Op(ppyolo_mbv3) use local rpc service at port: []
+INFO 2022-02-22 13:08:48,110 [dag.py:493] [DAG] Succ init
+INFO 2022-02-22 13:08:48,111 [dag.py:651] ================= USED OP =================
+INFO 2022-02-22 13:08:48,111 [dag.py:654] ppyolo_mbv3
+INFO 2022-02-22 13:08:48,111 [dag.py:655] -------------------------------------------
+INFO 2022-02-22 13:08:48,144 [dag.py:784] [DAG] Succ build DAG
+INFO 2022-02-22 13:08:48,151 [dag.py:816] [DAG] start
+INFO 2022-02-22 13:08:48,152 [dag.py:181] [DAG] set in channel succ, name [@DAGExecutor]
+INFO 2022-02-22 13:08:48,160 [pipeline_server.py:47] [PipelineServicer] succ init
+INFO 2022-02-22 13:08:48,162 [local_service_handler.py:172] Models(serving_server/) will be launched by device gpu. use_gpu:True, use_trt:True, use_lite:False, use_xpu:False, device_type:2, devices:[0], mem_optim:True, ir_optim:False, use_profile:False, thread_num:2, client_type:local_predictor, fetch_names:None, precision:fp32, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None
+INFO 2022-02-22 13:08:48,163 [operator.py:1162] Init cuda env in process 0
+INFO 2022-02-22 13:08:48,163 [local_service_handler.py:208] GET_CLIENT : concurrency_idx=0, device_num=1
+INFO 2022-02-22 13:08:49,254 [local_predict.py:115] LocalPredictor load_model_config params: model_path:serving_server/, use_gpu:True, gpu_id:0, use_profile:False, thread_num:2, mem_optim:True, ir_optim:False, use_trt:True, use_lite:False, use_xpu:False, precision:fp32, use_calib:False, use_mkldnn:False, mkldnn_cache_capacity:0, mkldnn_op_list:None, mkldnn_bf16_op_list:None, use_feed_fetch_ops:False,
+INFO 2022-02-22 13:08:50,979 [operator.py:1173] [ppyolo_mbv3|0] Succ init
+INFO 2022-02-22 13:08:55,147 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2022-02-22 13:08:55,148 [operator.py:1421] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2022-02-22 13:08:55,149 [dag.py:368] (data_id=0 log_id=0) Succ Generate ID
+INFO 2022-02-22 13:08:57,113 [dag.py:404] (data_id=0 log_id=0) Succ predict
+INFO 2022-02-22 13:09:14,543 [pipeline_server.py:51] (log_id=0) inference request name:ppyolo_mbv3 self.name:ppyolo_mbv3
+INFO 2022-02-22 13:09:14,544 [operator.py:1421] RequestOp unpack one request. log_id:0, clientip: name:ppyolo_mbv3, method:prediction
+INFO 2022-02-22 13:09:14,544 [dag.py:368] (data_id=1 log_id=0) Succ Generate ID
+INFO 2022-02-22 13:09:14,608 [dag.py:404] (data_id=1 log_id=0) Succ predict
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/PipelineServingLogs/pipeline.log.wf b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/PipelineServingLogs/pipeline.log.wf
new file mode 100644
index 000000000..3649c96bd
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/PipelineServingLogs/pipeline.log.wf
@@ -0,0 +1,2034 @@
+WARNING 2021-12-29 02:45:16,604 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 02:45:16,604 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 02:45:16,604 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 02:45:16,604 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 02:45:16,605 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 02:45:16,606 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 02:45:45,873 [operator.py:969] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 965, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 76, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 429, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 02:45:45,877 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 03:07:14,510 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 03:07:14,510 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:07:14,510 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 03:07:14,511 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 03:07:14,512 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 03:07:22,696 [operator.py:969] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 965, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 77, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 429, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 03:07:22,700 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 03:10:13,372 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 03:10:13,373 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 03:10:13,374 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 03:10:21,260 [operator.py:969] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 965, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 78, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 429, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 03:10:21,264 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 03:11:47,323 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 03:11:47,324 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 03:11:47,325 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 03:11:55,757 [operator.py:969] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 965, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 78, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 429, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 03:11:55,761 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 05:35:58,321 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 05:35:58,321 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:35:58,321 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 05:35:58,321 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 05:35:58,322 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 05:35:58,323 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 05:35:58,323 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 05:35:58,323 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 05:35:58,323 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-29 05:37:04,889 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 05:37:04,889 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:37:04,889 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 05:37:04,890 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 05:37:04,891 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 05:37:16,537 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 77, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 05:37:16,542 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 05:40:11,809 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 05:40:11,810 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 05:40:11,811 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 05:40:11,811 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 05:40:18,654 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 77, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 05:40:18,658 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 05:42:11,543 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 05:42:11,543 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:42:11,543 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 05:42:11,544 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 05:42:11,545 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 05:42:19,333 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 77, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 05:42:19,340 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 06:08:54,355 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:08:54,355 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:08:54,355 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:08:54,356 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:08:54,357 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:08:56,841 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'yaml' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 33, in init_op
+ yml_conf = yaml.safe_load(f)
+NameError: name 'yaml' is not defined
+WARNING 2021-12-29 06:10:19,803 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:10:19,803 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:10:19,803 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:10:19,803 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:10:19,804 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:10:19,805 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:10:19,805 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:10:19,805 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:10:19,805 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:10:22,339 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'yaml' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 33, in init_op
+ yml_conf = yaml.safe_load(f)
+NameError: name 'yaml' is not defined
+WARNING 2021-12-29 06:12:08,931 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:12:08,931 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:12:08,931 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:12:08,932 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:12:08,933 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:12:11,443 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 40, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+WARNING 2021-12-29 06:12:47,188 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:12:47,188 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:12:47,189 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:12:47,190 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:12:49,708 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 41, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+WARNING 2021-12-29 06:15:10,463 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:15:10,464 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:15:10,465 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:15:12,951 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 43, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+WARNING 2021-12-29 06:17:36,321 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:17:36,321 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:17:36,322 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:17:36,323 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:17:38,816 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 44, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+WARNING 2021-12-29 06:18:17,409 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:18:17,409 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:18:17,409 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:18:17,410 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:18:17,411 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:18:19,908 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: __init__() got an unexpected keyword argument 'interp'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 45, in init_op
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+TypeError: __init__() got an unexpected keyword argument 'interp'
+WARNING 2021-12-29 06:19:57,871 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:19:57,872 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:19:57,873 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:20:00,415 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 45, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:21:28,629 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:21:28,629 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:21:28,629 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:21:28,630 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:21:28,631 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:21:31,123 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 46, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:25:12,051 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:25:12,051 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:25:12,052 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:25:12,053 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:25:14,695 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 46, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:25:29,445 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:25:29,446 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:25:29,447 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:25:29,447 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:25:29,447 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:25:31,935 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 47, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:31:43,452 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:31:43,453 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:31:43,454 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2021-12-29 06:31:45,962 [operator.py:1176] [ppyolo_mbv3|0] failed to init op: name 'preprocess_ops' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1171, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1360, in _initialize
+ self.init_op()
+ File "web_service.py", line 49, in init_op
+ preprocess_ops.append(eval(op_type)(**new_op_info))
+NameError: name 'preprocess_ops' is not defined
+WARNING 2021-12-29 06:32:31,020 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:32:31,021 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:32:31,022 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-29 06:33:05,247 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:33:05,250 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:33:05,251 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:40:19,257 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:40:19,258 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:40:19,259 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 06:40:25,105 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 54, in preprocess
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+UnboundLocalError: local variable 'im_info' referenced before assignment
+ERROR 2021-12-29 06:40:25,111 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:42:03,381 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:42:03,382 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 06:42:13,106 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 54, in preprocess
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+UnboundLocalError: local variable 'im_info' referenced before assignment
+ERROR 2021-12-29 06:42:13,112 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:42:44,174 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:42:44,175 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:42:44,176 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:42:44,176 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:42:44,176 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 06:42:47,195 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 55, in preprocess
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+UnboundLocalError: local variable 'im_info' referenced before assignment
+ERROR 2021-12-29 06:42:47,200 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:44:34,233 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:44:34,234 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:44:34,235 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 06:44:43,710 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 55, in preprocess
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+UnboundLocalError: local variable 'im_info' referenced before assignment
+ERROR 2021-12-29 06:44:43,715 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: local variable 'im_info' referenced before assignment
+WARNING 2021-12-29 06:46:19,030 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:46:19,030 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:46:19,030 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:46:19,030 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:46:19,031 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:46:19,032 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:46:19,032 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:46:19,032 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:46:19,032 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 06:46:25,581 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: operands could not be broadcast together with shapes (3,640,640) (1,1,3) (3,640,640)
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 71, in preprocess
+ im = self.img_preprocess(im)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 492, in __call__
+ img = t(img)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 642, in __call__
+ return F.normalize(img, self.mean, self.std, self.channel_first)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/functional.py", line 33, in normalize
+ img -= img_mean
+ValueError: operands could not be broadcast together with shapes (3,640,640) (1,1,3) (3,640,640)
+ERROR 2021-12-29 06:46:25,587 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: operands could not be broadcast together with shapes (3,640,640) (1,1,3) (3,640,640)
+WARNING 2021-12-29 06:51:01,066 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:51:01,066 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:51:01,066 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:51:01,067 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:51:01,068 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 06:51:07,885 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 89, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 06:51:07,889 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 06:51:56,944 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:51:56,945 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:51:56,946 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-29 06:52:13,864 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:52:13,865 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:52:13,866 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 06:52:20,309 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 86, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 06:52:20,314 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 06:54:00,624 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 06:54:00,624 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:54:00,624 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 06:54:00,625 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 06:54:00,626 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 06:54:07,453 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 86, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 06:54:07,458 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:13:12,175 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:13:12,175 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:13:12,176 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:13:12,177 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 07:13:20,265 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 430, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 07:13:20,269 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:16:27,832 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:16:27,832 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:16:27,833 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:16:27,834 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 07:16:32,263 [operator.py:695] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: name 'im_shape' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 678, in _run_preprocess
+ parsed_data, data_id, logid_dict.get(data_id))
+ File "web_service.py", line 68, in preprocess
+ "im_shape": im_info[im_shape],#np.array(list(im.shape[1:])).reshape(-1)[np.newaxis,:],
+NameError: name 'im_shape' is not defined
+ERROR 2021-12-29 07:16:32,267 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to preprocess: name 'im_shape' is not defined
+WARNING 2021-12-29 07:17:33,514 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:17:33,514 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:17:33,515 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:17:33,516 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 07:17:39,797 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 07:17:39,802 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:19:06,141 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:19:06,142 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:19:06,143 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 07:19:12,763 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 07:19:12,767 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:40:25,237 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:40:25,237 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:40:25,237 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:40:25,237 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:40:25,238 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:40:25,239 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:40:25,239 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:40:25,239 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-29 07:42:11,629 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:42:11,629 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:42:11,629 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:42:11,630 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:42:11,631 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:42:11,631 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:42:11,631 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:42:11,631 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 07:42:25,683 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_0.tmp_1.lod'
+ERROR 2021-12-29 07:42:25,687 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_0.tmp_1.lod'
+WARNING 2021-12-29 07:49:58,432 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-29 07:49:58,433 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-29 07:49:58,434 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-29 07:50:05,801 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_4.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 87, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_4.tmp_1.lod'
+ERROR 2021-12-29 07:50:05,806 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_4.tmp_1.lod'
+WARNING 2021-12-30 06:53:16,236 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 06:53:16,237 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 06:53:16,238 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 06:53:28,471 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_3.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 81, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_3.tmp_1.lod'
+ERROR 2021-12-30 06:53:28,476 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_3.tmp_1.lod'
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 07:57:06,805 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 07:57:06,806 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 07:57:06,807 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 07:57:06,807 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 07:57:15,249 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 81, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 07:57:15,253 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:12:07,133 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:12:07,134 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:12:14,700 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 82, in postprocess
+ np_score_list.append(fetch_dict[out_idx])
+KeyError: 0
+ERROR 2021-12-30 08:12:14,707 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:13:45,672 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:13:45,673 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:13:45,674 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:13:52,217 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 83, in postprocess
+ np_score_list.append(fetch_dict[out_idx])
+KeyError: 0
+ERROR 2021-12-30 08:13:52,220 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:17:15,481 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:17:15,482 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:17:22,212 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 83, in postprocess
+ np_score_list.append(fetch_dict[i])
+KeyError: 0
+ERROR 2021-12-30 08:17:22,217 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:17:30,895 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:17:30,898 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:17:30,899 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:17:39,176 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 83, in postprocess
+ np_score_list.append(fetch_dict[i])
+KeyError: 0
+ERROR 2021-12-30 08:17:39,180 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:20:13,195 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:20:13,196 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:20:13,197 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:20:20,892 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 84, in postprocess
+ np_score_list.append(fetch_dict[i])
+KeyError: 0
+ERROR 2021-12-30 08:20:20,896 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 0
+WARNING 2021-12-30 08:31:54,772 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:31:54,772 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:31:54,772 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:31:54,772 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:31:54,773 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:31:54,774 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:31:54,774 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:31:54,774 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:32:01,331 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 91, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:32:01,335 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:34:21,409 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:34:21,409 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:34:21,410 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:34:21,411 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:34:28,756 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 96, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:34:28,761 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:43:32,266 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:43:32,266 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:43:32,267 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:43:32,267 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:43:32,267 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:43:32,267 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:43:32,268 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:43:32,269 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:43:32,270 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:43:46,213 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 111, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:43:46,218 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:49:32,281 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:49:32,281 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:49:32,282 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:49:32,283 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:49:32,284 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:49:32,284 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:49:32,284 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:49:32,284 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:49:32,285 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:49:39,879 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 113, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:49:39,884 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:52:42,378 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:52:42,379 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:52:42,379 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:52:42,379 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:52:42,379 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:52:42,380 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:52:42,380 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:52:42,380 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:52:42,380 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:52:42,381 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:52:42,382 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:52:42,382 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:52:42,382 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 08:52:50,006 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 110, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 08:52:50,011 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 08:59:43,422 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 08:59:43,422 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:59:43,422 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 08:59:43,423 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 08:59:43,423 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 08:59:43,423 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 08:59:43,423 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 08:59:43,424 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 08:59:43,424 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 08:59:43,424 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 08:59:43,424 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 08:59:43,425 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 08:59:43,426 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-30 09:03:09,956 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:03:09,956 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:03:09,957 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:03:09,958 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:03:09,959 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-30 09:03:11,902 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:03:11,902 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:03:11,902 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:03:11,903 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:03:11,904 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:03:11,904 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:03:11,904 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:03:11,904 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:03:11,905 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:03:22,048 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 102, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 09:03:22,053 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 09:07:27,378 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:07:27,379 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:07:27,379 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:07:27,379 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:07:27,380 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:07:27,380 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:07:27,380 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:07:27,380 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:07:27,381 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:07:27,381 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:07:27,381 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:07:27,381 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:07:27,382 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-30 09:07:48,750 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:07:48,751 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:07:48,751 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:07:48,751 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:07:48,751 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:07:48,752 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:07:48,752 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:07:48,752 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:07:48,752 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:07:48,753 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:07:48,754 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:07:48,754 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:07:53,007 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 102, in postprocess
+ res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 431, in __call__
+ self.clsid2catid)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 346, in _get_bbox_result
+ lod = [fetch_map[fetch_name + '.lod']]
+KeyError: 'save_infer_model/scale_7.tmp_1.lod'
+ERROR 2021-12-30 09:07:53,011 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: 'save_infer_model/scale_7.tmp_1.lod'
+WARNING 2021-12-30 09:08:06,502 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:08:06,503 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:08:06,503 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:08:06,503 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:08:06,504 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:08:06,504 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:08:06,504 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:08:06,504 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:08:06,505 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:08:06,506 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:08:06,506 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:08:06,506 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:08:06,506 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:08:11,524 [operator.py:1000] (log_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: output of postprocess funticon must be dict type, but get
+ERROR 2021-12-30 09:08:11,527 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (log_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: output of postprocess funticon must be dict type, but get
+WARNING 2021-12-30 09:19:39,408 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:19:39,409 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:19:39,409 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:19:39,409 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:19:39,409 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:19:39,410 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:19:39,411 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:19:39,412 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:19:39,412 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:19:46,754 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: name 'res_dict' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 101, in postprocess
+ res_dict[b] = {}
+NameError: name 'res_dict' is not defined
+ERROR 2021-12-30 09:19:46,757 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: name 'res_dict' is not defined
+WARNING 2021-12-30 09:20:11,181 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:20:11,181 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:20:11,181 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:20:11,182 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:20:11,183 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:20:15,417 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: list assignment index out of range
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 102, in postprocess
+ res_dict[b] = {}
+IndexError: list assignment index out of range
+ERROR 2021-12-30 09:20:15,421 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: list assignment index out of range
+WARNING 2021-12-30 09:21:19,542 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:21:19,543 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:21:19,543 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:21:19,543 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:21:19,543 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:21:19,544 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:21:19,544 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:21:19,544 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:21:19,544 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:21:19,545 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:21:19,546 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:21:19,546 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:21:19,546 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:21:24,326 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: list assignment index out of range
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 103, in postprocess
+ res_dict[b] = {}
+IndexError: list assignment index out of range
+ERROR 2021-12-30 09:21:24,330 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: list assignment index out of range
+WARNING 2021-12-30 09:23:31,824 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:23:31,824 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:23:31,824 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:23:31,824 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:23:31,825 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:23:31,825 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:23:31,825 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:23:31,825 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:23:31,826 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:23:31,827 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:23:31,827 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:23:31,827 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:23:31,827 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:23:37,722 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: name 'a' is not defined
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 101, in postprocess
+ for b in range(a.ndim):
+NameError: name 'a' is not defined
+ERROR 2021-12-30 09:23:37,726 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: name 'a' is not defined
+WARNING 2021-12-30 09:24:02,842 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:24:02,842 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:24:02,843 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:24:02,843 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:24:02,843 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:24:02,843 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:24:02,844 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:24:02,845 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:24:02,846 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:24:07,046 [operator.py:1000] (log_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: output of postprocess funticon must be dict type, but get
+ERROR 2021-12-30 09:24:07,049 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (log_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: output of postprocess funticon must be dict type, but get
+WARNING 2021-12-30 09:25:54,030 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:25:54,031 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:25:54,031 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:25:54,031 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:25:54,031 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:25:54,032 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:25:54,033 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:25:54,034 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:25:54,034 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:25:58,334 [operator.py:1487] (logid=0) Failed to pack RPC response package:
+WARNING 2021-12-30 09:39:07,135 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:39:07,136 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:39:07,136 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:39:07,136 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:39:07,137 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:39:07,138 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:39:07,139 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:39:07,139 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:39:07,139 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:40:00,249 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:40:00,253 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:40:48,711 [operator.py:973] (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:40:48,714 [dag.py:409] (data_id=2 log_id=0) Failed to predict: (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:40:53,720 [operator.py:973] (data_id=3 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:40:53,723 [dag.py:409] (data_id=3 log_id=0) Failed to predict: (data_id=3 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 09:42:01,778 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:42:01,779 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:42:01,779 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:42:01,779 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:42:01,779 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:42:01,780 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:42:01,780 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:42:01,780 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:42:01,780 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:42:01,781 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:42:01,782 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:42:01,782 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:42:01,782 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:42:06,612 [operator.py:973] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 09:42:06,616 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 09:42:19,953 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 09:42:19,957 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+WARNING 2021-12-30 09:42:23,963 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:42:23,963 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:42:23,963 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:42:23,963 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:42:23,964 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:42:23,964 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:42:23,964 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:42:23,964 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:42:23,965 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:42:23,965 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:42:23,965 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:42:23,965 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:42:23,966 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:42:23,966 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:42:23,966 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:42:23,966 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:42:23,967 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:43:10,985 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:43:10,988 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:43:15,944 [operator.py:973] (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:43:15,950 [dag.py:409] (data_id=2 log_id=0) Failed to predict: (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 09:44:54,821 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:44:54,822 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:44:54,822 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:44:54,822 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:44:54,822 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:44:54,823 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:44:54,824 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:44:54,824 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:44:54,824 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:44:54,824 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:44:54,825 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:44:54,825 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:44:54,825 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:45:05,468 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:45:05,472 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 09:46:17,679 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 09:46:17,680 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 09:46:17,681 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 09:46:17,681 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 09:46:17,681 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 09:46:17,681 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 09:46:17,682 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 09:46:17,683 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 09:46:17,683 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 09:46:25,584 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 09:46:25,588 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 11:05:40,432 [pipeline_server.py:55] (log_id=0) name dismatch error. request.name:recognition,server.name=ppyolo_mbv3
+ERROR 2021-12-30 11:05:49,271 [operator.py:973] (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 11:05:49,274 [dag.py:409] (data_id=2 log_id=0) Failed to predict: (data_id=2 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 11:05:54,559 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:05:54,560 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:05:54,561 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:05:54,561 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:05:54,561 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:05:54,561 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:05:54,562 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:05:54,562 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:05:54,562 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:05:54,562 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:05:54,563 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:05:54,563 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:05:54,563 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 11:15:50,225 [operator.py:973] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 969, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 11:15:50,229 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 11:24:24,428 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:24:24,428 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:24:24,428 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:24:24,429 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:24:24,430 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:24:24,431 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:24:24,431 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:24:24,431 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:24:24,431 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 11:24:40,005 [operator.py:976] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+TypeError: __call__() takes 3 positional arguments but 4 were given
+ERROR 2021-12-30 11:24:40,009 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: __call__() takes 3 positional arguments but 4 were given
+WARNING 2021-12-30 11:37:49,610 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:37:49,611 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:37:49,611 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:37:49,611 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:37:49,611 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:37:49,612 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:37:49,612 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:37:49,612 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:37:49,612 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:37:49,613 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:37:49,614 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:37:49,614 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:37:49,614 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 11:37:54,033 [operator.py:976] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 92, in postprocess
+ np_boxes, np_boxes_num = self.postprocess(np_score_list, np_boxes_list)
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:37:54,039 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:37:55,960 [operator.py:976] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 92, in postprocess
+ np_boxes, np_boxes_num = self.postprocess(np_score_list, np_boxes_list)
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:37:55,963 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+WARNING 2021-12-30 11:38:09,257 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:38:09,258 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:38:09,258 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:38:09,258 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:38:09,258 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:38:09,259 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:38:09,259 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:38:09,259 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:38:09,259 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:38:09,260 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:38:09,261 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:38:09,261 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:38:09,261 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+ERROR 2021-12-30 11:38:13,407 [operator.py:976] (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 92, in postprocess
+ np_boxes, np_boxes_num = self.postprocess(np_score_list, np_boxes_list)
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:38:13,411 [dag.py:409] (data_id=0 log_id=0) Failed to predict: (data_id=0 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:40:01,379 [operator.py:976] (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 972, in _run_postprocess
+ logid_dict.get(data_id))
+ File "web_service.py", line 92, in postprocess
+ self.post_process = PicoDetPostProcess(
+TypeError: postprocess() missing 1 required positional argument: 'log_id'
+ERROR 2021-12-30 11:40:01,383 [dag.py:409] (data_id=1 log_id=0) Failed to predict: (data_id=1 log_id=0) [ppyolo_mbv3|0] Failed to postprocess: postprocess() missing 1 required positional argument: 'log_id'
+WARNING 2021-12-30 11:40:05,557 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:40:05,557 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:40:05,558 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:40:05,559 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:40:05,560 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2021-12-30 11:41:26,917 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2021-12-30 11:41:26,918 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2021-12-30 11:41:26,919 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2021-12-30 11:41:26,920 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2021-12-30 11:41:26,921 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-14 09:24:33,065 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-14 09:24:33,072 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-14 09:24:33,073 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-14 09:24:33,074 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-14 09:24:33,074 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-14 09:24:33,074 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2022-02-14 09:24:35,602 [operator.py:1179] [ppyolo_mbv3|0] failed to init op: [Errno 2] No such file or directory: 'label_list.txt'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1174, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1363, in _initialize
+ self.init_op()
+ File "web_service.py", line 30, in init_op
+ self.img_postprocess = RCNNPostprocess("label_list.txt", "output")
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 288, in __init__
+ with open(label_file) as fin:
+FileNotFoundError: [Errno 2] No such file or directory: 'label_list.txt'
+WARNING 2022-02-14 09:24:42,704 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-14 09:24:42,705 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:24:42,705 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-14 09:24:42,705 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-14 09:24:42,705 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-14 09:24:42,706 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-14 09:24:42,706 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:24:42,706 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-14 09:24:42,706 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-14 09:24:42,707 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-14 09:24:42,707 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-14 09:24:42,707 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-14 09:24:42,707 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-14 09:24:42,708 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-14 09:24:42,708 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-14 09:24:42,708 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-14 09:24:42,708 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+CRITICAL 2022-02-14 09:24:45,275 [operator.py:1179] [ppyolo_mbv3|0] failed to init op: [Errno 2] No such file or directory: 'label_list.txt'
+Traceback (most recent call last):
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1174, in _run
+ profiler = self._initialize(is_thread_op, concurrency_idx)
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_server/pipeline/operator.py", line 1363, in _initialize
+ self.init_op()
+ File "web_service.py", line 30, in init_op
+ self.img_postprocess = RCNNPostprocess("label_list.txt", "output")
+ File "/usr/local/python3.7.0/lib/python3.7/site-packages/paddle_serving_app/reader/image_reader.py", line 288, in __init__
+ with open(label_file) as fin:
+FileNotFoundError: [Errno 2] No such file or directory: 'label_list.txt'
+WARNING 2022-02-14 09:26:03,671 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-14 09:26:03,671 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:26:03,671 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-14 09:26:03,672 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-14 09:26:03,672 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-14 09:26:03,672 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-14 09:26:03,672 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-14 09:26:03,673 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-14 09:26:03,673 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-14 09:26:03,673 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-14 09:26:03,673 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-14 09:26:03,674 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-14 09:26:03,675 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-16 16:56:51,836 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-16 16:56:51,837 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-16 17:05:23,144 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-16 17:05:23,145 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] use_profile not set, use default: False
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] channel_recv_frist_arrive not set, use default: False
+WARNING 2022-02-16 17:09:22,047 [pipeline_server.py:509] [CONF] timeout not set, use default: -1
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] retry not set, use default: 1
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] workdir not set, use default:
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] precision not set, use default: fp32
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] use_calib not set, use default: False
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-16 17:09:22,048 [pipeline_server.py:509] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-22 13:07:32,971 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:07:32,971 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:07:32,971 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:07:32,971 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:07:32,972 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] build_dag_each_worker not set, use default: False
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] client_type not set, use default: brpc
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] use_profile not set, use default: False
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] channel_size not set, use default: 0
+WARNING 2022-02-22 13:08:48,079 [pipeline_server.py:496] [CONF] timeout not set, use default: -1
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] retry not set, use default: 1
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] batch_size not set, use default: 1
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] auto_batching_timeout not set, use default: -1
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] workdir not set, use default:
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] thread_num not set, use default: 2
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] mem_optim not set, use default: True
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] ir_optim not set, use default: False
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] precision not set, use default: fp32
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] use_calib not set, use default: False
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] use_mkldnn not set, use default: False
+WARNING 2022-02-22 13:08:48,080 [pipeline_server.py:496] [CONF] mkldnn_cache_capacity not set, use default: 0
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/PipelineServingLogs/pipeline.tracer b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/PipelineServingLogs/pipeline.tracer
new file mode 100644
index 000000000..5dfdefae0
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/PipelineServingLogs/pipeline.tracer
@@ -0,0 +1,6974 @@
+2021-12-29 02:45:16,713 ==================== TRACER ======================
+2021-12-29 02:45:16,715 Channel (server worker num[20]):
+2021-12-29 02:45:16,717 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:45:16,718 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:45:46,748 ==================== TRACER ======================
+2021-12-29 02:45:46,749 DAGExecutor:
+2021-12-29 02:45:46,749 Query count[1]
+2021-12-29 02:45:46,749 QPS[0.03333333333333333 q/s]
+2021-12-29 02:45:46,750 Succ[0.0]
+2021-12-29 02:45:46,750 Error req[0]
+2021-12-29 02:45:46,750 Latency:
+2021-12-29 02:45:46,750 ave[1691.297 ms]
+2021-12-29 02:45:46,750 .50[1691.297 ms]
+2021-12-29 02:45:46,751 .60[1691.297 ms]
+2021-12-29 02:45:46,751 .70[1691.297 ms]
+2021-12-29 02:45:46,751 .80[1691.297 ms]
+2021-12-29 02:45:46,751 .90[1691.297 ms]
+2021-12-29 02:45:46,751 .95[1691.297 ms]
+2021-12-29 02:45:46,752 .99[1691.297 ms]
+2021-12-29 02:45:46,752 Channel (server worker num[20]):
+2021-12-29 02:45:46,753 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:45:46,753 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:46:16,784 ==================== TRACER ======================
+2021-12-29 02:46:16,785 Channel (server worker num[20]):
+2021-12-29 02:46:16,786 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:46:16,786 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:46:46,817 ==================== TRACER ======================
+2021-12-29 02:46:46,818 Channel (server worker num[20]):
+2021-12-29 02:46:46,818 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:46:46,819 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:47:16,849 ==================== TRACER ======================
+2021-12-29 02:47:16,850 Channel (server worker num[20]):
+2021-12-29 02:47:16,851 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:47:16,852 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:47:46,882 ==================== TRACER ======================
+2021-12-29 02:47:46,883 Channel (server worker num[20]):
+2021-12-29 02:47:46,884 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:47:46,884 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:48:16,888 ==================== TRACER ======================
+2021-12-29 02:48:16,889 Channel (server worker num[20]):
+2021-12-29 02:48:16,890 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:48:16,891 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:48:46,916 ==================== TRACER ======================
+2021-12-29 02:48:46,917 Channel (server worker num[20]):
+2021-12-29 02:48:46,918 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:48:46,919 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:49:16,949 ==================== TRACER ======================
+2021-12-29 02:49:16,950 Channel (server worker num[20]):
+2021-12-29 02:49:16,951 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:49:16,951 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:49:46,982 ==================== TRACER ======================
+2021-12-29 02:49:46,982 Channel (server worker num[20]):
+2021-12-29 02:49:46,983 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:49:46,984 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:50:17,014 ==================== TRACER ======================
+2021-12-29 02:50:17,015 Channel (server worker num[20]):
+2021-12-29 02:50:17,016 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:50:17,017 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:50:47,047 ==================== TRACER ======================
+2021-12-29 02:50:47,048 Channel (server worker num[20]):
+2021-12-29 02:50:47,049 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:50:47,049 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:51:17,080 ==================== TRACER ======================
+2021-12-29 02:51:17,080 Channel (server worker num[20]):
+2021-12-29 02:51:17,081 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:51:17,082 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:51:47,112 ==================== TRACER ======================
+2021-12-29 02:51:47,113 Channel (server worker num[20]):
+2021-12-29 02:51:47,114 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:51:47,114 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:52:17,145 ==================== TRACER ======================
+2021-12-29 02:52:17,146 Channel (server worker num[20]):
+2021-12-29 02:52:17,146 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:52:17,147 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:52:47,177 ==================== TRACER ======================
+2021-12-29 02:52:47,178 Channel (server worker num[20]):
+2021-12-29 02:52:47,179 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:52:47,179 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:53:17,210 ==================== TRACER ======================
+2021-12-29 02:53:17,211 Channel (server worker num[20]):
+2021-12-29 02:53:17,211 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:53:17,212 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:53:47,243 ==================== TRACER ======================
+2021-12-29 02:53:47,243 Channel (server worker num[20]):
+2021-12-29 02:53:47,244 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:53:47,245 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:54:17,275 ==================== TRACER ======================
+2021-12-29 02:54:17,276 Channel (server worker num[20]):
+2021-12-29 02:54:17,277 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:54:17,278 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:54:47,308 ==================== TRACER ======================
+2021-12-29 02:54:47,309 Channel (server worker num[20]):
+2021-12-29 02:54:47,310 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:54:47,310 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:55:17,341 ==================== TRACER ======================
+2021-12-29 02:55:17,342 Channel (server worker num[20]):
+2021-12-29 02:55:17,342 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:55:17,343 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:55:47,373 ==================== TRACER ======================
+2021-12-29 02:55:47,374 Channel (server worker num[20]):
+2021-12-29 02:55:47,375 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:55:47,376 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:56:17,406 ==================== TRACER ======================
+2021-12-29 02:56:17,407 Channel (server worker num[20]):
+2021-12-29 02:56:17,408 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:56:17,409 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:56:47,439 ==================== TRACER ======================
+2021-12-29 02:56:47,440 Channel (server worker num[20]):
+2021-12-29 02:56:47,441 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:56:47,441 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:57:17,453 ==================== TRACER ======================
+2021-12-29 02:57:17,454 Channel (server worker num[20]):
+2021-12-29 02:57:17,455 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:57:17,456 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:57:47,486 ==================== TRACER ======================
+2021-12-29 02:57:47,487 Channel (server worker num[20]):
+2021-12-29 02:57:47,488 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:57:47,489 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:58:17,519 ==================== TRACER ======================
+2021-12-29 02:58:17,520 Channel (server worker num[20]):
+2021-12-29 02:58:17,521 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:58:17,521 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:58:47,552 ==================== TRACER ======================
+2021-12-29 02:58:47,553 Channel (server worker num[20]):
+2021-12-29 02:58:47,553 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:58:47,554 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:59:17,584 ==================== TRACER ======================
+2021-12-29 02:59:17,585 Channel (server worker num[20]):
+2021-12-29 02:59:17,586 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:59:17,587 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 02:59:47,617 ==================== TRACER ======================
+2021-12-29 02:59:47,618 Channel (server worker num[20]):
+2021-12-29 02:59:47,619 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 02:59:47,620 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:00:17,650 ==================== TRACER ======================
+2021-12-29 03:00:17,651 Channel (server worker num[20]):
+2021-12-29 03:00:17,652 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:00:17,652 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:00:47,683 ==================== TRACER ======================
+2021-12-29 03:00:47,684 Channel (server worker num[20]):
+2021-12-29 03:00:47,684 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:00:47,685 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:01:17,715 ==================== TRACER ======================
+2021-12-29 03:01:17,716 Channel (server worker num[20]):
+2021-12-29 03:01:17,717 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:01:17,718 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:01:47,748 ==================== TRACER ======================
+2021-12-29 03:01:47,749 Channel (server worker num[20]):
+2021-12-29 03:01:47,750 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:01:47,750 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:02:17,781 ==================== TRACER ======================
+2021-12-29 03:02:17,782 Channel (server worker num[20]):
+2021-12-29 03:02:17,782 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:02:17,783 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:02:47,813 ==================== TRACER ======================
+2021-12-29 03:02:47,814 Channel (server worker num[20]):
+2021-12-29 03:02:47,815 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:02:47,816 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:03:17,846 ==================== TRACER ======================
+2021-12-29 03:03:17,847 Channel (server worker num[20]):
+2021-12-29 03:03:17,848 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:03:17,848 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:03:47,879 ==================== TRACER ======================
+2021-12-29 03:03:47,879 Channel (server worker num[20]):
+2021-12-29 03:03:47,880 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:03:47,881 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:04:17,911 ==================== TRACER ======================
+2021-12-29 03:04:17,912 Channel (server worker num[20]):
+2021-12-29 03:04:17,913 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:04:17,914 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:04:47,944 ==================== TRACER ======================
+2021-12-29 03:04:47,945 Channel (server worker num[20]):
+2021-12-29 03:04:47,946 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:04:47,947 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:05:17,965 ==================== TRACER ======================
+2021-12-29 03:05:17,966 Channel (server worker num[20]):
+2021-12-29 03:05:17,967 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:05:17,968 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:05:47,998 ==================== TRACER ======================
+2021-12-29 03:05:47,999 Channel (server worker num[20]):
+2021-12-29 03:05:48,000 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:05:48,001 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:06:18,031 ==================== TRACER ======================
+2021-12-29 03:06:18,032 Channel (server worker num[20]):
+2021-12-29 03:06:18,033 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:06:18,033 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:06:48,064 ==================== TRACER ======================
+2021-12-29 03:06:48,065 Channel (server worker num[20]):
+2021-12-29 03:06:48,065 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:06:48,066 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:07:14,621 ==================== TRACER ======================
+2021-12-29 03:07:14,623 Channel (server worker num[20]):
+2021-12-29 03:07:14,625 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:07:14,625 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:07:44,656 ==================== TRACER ======================
+2021-12-29 03:07:44,657 DAGExecutor:
+2021-12-29 03:07:44,657 Query count[1]
+2021-12-29 03:07:44,657 QPS[0.03333333333333333 q/s]
+2021-12-29 03:07:44,657 Succ[0.0]
+2021-12-29 03:07:44,658 Error req[0]
+2021-12-29 03:07:44,658 Latency:
+2021-12-29 03:07:44,658 ave[1819.424 ms]
+2021-12-29 03:07:44,658 .50[1819.424 ms]
+2021-12-29 03:07:44,658 .60[1819.424 ms]
+2021-12-29 03:07:44,659 .70[1819.424 ms]
+2021-12-29 03:07:44,659 .80[1819.424 ms]
+2021-12-29 03:07:44,659 .90[1819.424 ms]
+2021-12-29 03:07:44,659 .95[1819.424 ms]
+2021-12-29 03:07:44,659 .99[1819.424 ms]
+2021-12-29 03:07:44,659 Channel (server worker num[20]):
+2021-12-29 03:07:44,660 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:07:44,661 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:08:14,691 ==================== TRACER ======================
+2021-12-29 03:08:14,692 Channel (server worker num[20]):
+2021-12-29 03:08:14,693 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:08:14,694 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:08:44,724 ==================== TRACER ======================
+2021-12-29 03:08:44,725 Channel (server worker num[20]):
+2021-12-29 03:08:44,726 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:08:44,727 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:09:14,757 ==================== TRACER ======================
+2021-12-29 03:09:14,758 Channel (server worker num[20]):
+2021-12-29 03:09:14,759 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:09:14,760 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:09:44,776 ==================== TRACER ======================
+2021-12-29 03:09:44,777 Channel (server worker num[20]):
+2021-12-29 03:09:44,778 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:09:44,779 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:10:13,471 ==================== TRACER ======================
+2021-12-29 03:10:13,473 Channel (server worker num[20]):
+2021-12-29 03:10:13,475 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:10:13,476 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:10:43,506 ==================== TRACER ======================
+2021-12-29 03:10:43,508 DAGExecutor:
+2021-12-29 03:10:43,508 Query count[1]
+2021-12-29 03:10:43,508 QPS[0.03333333333333333 q/s]
+2021-12-29 03:10:43,508 Succ[0.0]
+2021-12-29 03:10:43,508 Error req[0]
+2021-12-29 03:10:43,509 Latency:
+2021-12-29 03:10:43,509 ave[1855.084 ms]
+2021-12-29 03:10:43,509 .50[1855.084 ms]
+2021-12-29 03:10:43,509 .60[1855.084 ms]
+2021-12-29 03:10:43,509 .70[1855.084 ms]
+2021-12-29 03:10:43,509 .80[1855.084 ms]
+2021-12-29 03:10:43,510 .90[1855.084 ms]
+2021-12-29 03:10:43,510 .95[1855.084 ms]
+2021-12-29 03:10:43,510 .99[1855.084 ms]
+2021-12-29 03:10:43,510 Channel (server worker num[20]):
+2021-12-29 03:10:43,511 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:10:43,512 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:11:13,542 ==================== TRACER ======================
+2021-12-29 03:11:13,543 Channel (server worker num[20]):
+2021-12-29 03:11:13,544 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:11:13,544 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:11:43,560 ==================== TRACER ======================
+2021-12-29 03:11:43,561 Channel (server worker num[20]):
+2021-12-29 03:11:43,562 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:11:43,562 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:11:47,437 ==================== TRACER ======================
+2021-12-29 03:11:47,438 Channel (server worker num[20]):
+2021-12-29 03:11:47,440 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:11:47,441 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:12:17,457 ==================== TRACER ======================
+2021-12-29 03:12:17,458 DAGExecutor:
+2021-12-29 03:12:17,459 Query count[1]
+2021-12-29 03:12:17,459 QPS[0.03333333333333333 q/s]
+2021-12-29 03:12:17,459 Succ[0.0]
+2021-12-29 03:12:17,459 Error req[0]
+2021-12-29 03:12:17,459 Latency:
+2021-12-29 03:12:17,460 ave[1822.881 ms]
+2021-12-29 03:12:17,460 .50[1822.881 ms]
+2021-12-29 03:12:17,460 .60[1822.881 ms]
+2021-12-29 03:12:17,460 .70[1822.881 ms]
+2021-12-29 03:12:17,460 .80[1822.881 ms]
+2021-12-29 03:12:17,461 .90[1822.881 ms]
+2021-12-29 03:12:17,461 .95[1822.881 ms]
+2021-12-29 03:12:17,461 .99[1822.881 ms]
+2021-12-29 03:12:17,461 Channel (server worker num[20]):
+2021-12-29 03:12:17,462 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:12:17,463 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:12:47,493 ==================== TRACER ======================
+2021-12-29 03:12:47,494 Channel (server worker num[20]):
+2021-12-29 03:12:47,495 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:12:47,495 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:13:17,502 ==================== TRACER ======================
+2021-12-29 03:13:17,502 Channel (server worker num[20]):
+2021-12-29 03:13:17,503 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:13:17,504 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:13:47,534 ==================== TRACER ======================
+2021-12-29 03:13:47,535 Channel (server worker num[20]):
+2021-12-29 03:13:47,536 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:13:47,537 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:14:17,567 ==================== TRACER ======================
+2021-12-29 03:14:17,568 Channel (server worker num[20]):
+2021-12-29 03:14:17,569 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:14:17,570 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:14:47,595 ==================== TRACER ======================
+2021-12-29 03:14:47,596 Channel (server worker num[20]):
+2021-12-29 03:14:47,597 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:14:47,598 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:15:17,628 ==================== TRACER ======================
+2021-12-29 03:15:17,629 Channel (server worker num[20]):
+2021-12-29 03:15:17,630 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:15:17,631 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:15:47,661 ==================== TRACER ======================
+2021-12-29 03:15:47,662 Channel (server worker num[20]):
+2021-12-29 03:15:47,663 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:15:47,663 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:16:17,694 ==================== TRACER ======================
+2021-12-29 03:16:17,695 Channel (server worker num[20]):
+2021-12-29 03:16:17,695 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:16:17,696 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:16:47,707 ==================== TRACER ======================
+2021-12-29 03:16:47,707 Channel (server worker num[20]):
+2021-12-29 03:16:47,708 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:16:47,709 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:17:17,739 ==================== TRACER ======================
+2021-12-29 03:17:17,740 Channel (server worker num[20]):
+2021-12-29 03:17:17,741 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:17:17,742 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:17:47,769 ==================== TRACER ======================
+2021-12-29 03:17:47,770 Channel (server worker num[20]):
+2021-12-29 03:17:47,770 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:17:47,771 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:18:17,801 ==================== TRACER ======================
+2021-12-29 03:18:17,802 Channel (server worker num[20]):
+2021-12-29 03:18:17,803 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:18:17,804 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:18:47,834 ==================== TRACER ======================
+2021-12-29 03:18:47,835 Channel (server worker num[20]):
+2021-12-29 03:18:47,836 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:18:47,837 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:19:17,846 ==================== TRACER ======================
+2021-12-29 03:19:17,847 Channel (server worker num[20]):
+2021-12-29 03:19:17,848 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:19:17,848 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:19:47,879 ==================== TRACER ======================
+2021-12-29 03:19:47,880 Channel (server worker num[20]):
+2021-12-29 03:19:47,881 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:19:47,881 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:20:17,912 ==================== TRACER ======================
+2021-12-29 03:20:17,912 Channel (server worker num[20]):
+2021-12-29 03:20:17,913 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:20:17,914 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:20:47,944 ==================== TRACER ======================
+2021-12-29 03:20:47,945 Channel (server worker num[20]):
+2021-12-29 03:20:47,946 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:20:47,947 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:21:17,977 ==================== TRACER ======================
+2021-12-29 03:21:17,978 Channel (server worker num[20]):
+2021-12-29 03:21:17,978 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:21:17,979 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:21:48,009 ==================== TRACER ======================
+2021-12-29 03:21:48,010 Channel (server worker num[20]):
+2021-12-29 03:21:48,011 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:21:48,012 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:22:18,042 ==================== TRACER ======================
+2021-12-29 03:22:18,043 Channel (server worker num[20]):
+2021-12-29 03:22:18,044 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:22:18,045 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:22:48,075 ==================== TRACER ======================
+2021-12-29 03:22:48,076 Channel (server worker num[20]):
+2021-12-29 03:22:48,076 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:22:48,077 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:23:18,107 ==================== TRACER ======================
+2021-12-29 03:23:18,108 Channel (server worker num[20]):
+2021-12-29 03:23:18,109 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:23:18,110 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:23:48,140 ==================== TRACER ======================
+2021-12-29 03:23:48,141 Channel (server worker num[20]):
+2021-12-29 03:23:48,142 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:23:48,143 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:24:18,173 ==================== TRACER ======================
+2021-12-29 03:24:18,174 Channel (server worker num[20]):
+2021-12-29 03:24:18,175 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:24:18,175 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:24:48,206 ==================== TRACER ======================
+2021-12-29 03:24:48,206 Channel (server worker num[20]):
+2021-12-29 03:24:48,207 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:24:48,208 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:25:18,238 ==================== TRACER ======================
+2021-12-29 03:25:18,239 Channel (server worker num[20]):
+2021-12-29 03:25:18,242 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:25:18,242 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:25:48,273 ==================== TRACER ======================
+2021-12-29 03:25:48,273 Channel (server worker num[20]):
+2021-12-29 03:25:48,274 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:25:48,275 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:26:18,305 ==================== TRACER ======================
+2021-12-29 03:26:18,306 Channel (server worker num[20]):
+2021-12-29 03:26:18,307 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:26:18,308 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:26:48,338 ==================== TRACER ======================
+2021-12-29 03:26:48,339 Channel (server worker num[20]):
+2021-12-29 03:26:48,340 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:26:48,340 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:27:18,371 ==================== TRACER ======================
+2021-12-29 03:27:18,371 Channel (server worker num[20]):
+2021-12-29 03:27:18,372 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:27:18,373 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:27:48,403 ==================== TRACER ======================
+2021-12-29 03:27:48,404 Channel (server worker num[20]):
+2021-12-29 03:27:48,405 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:27:48,406 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:28:18,436 ==================== TRACER ======================
+2021-12-29 03:28:18,437 Channel (server worker num[20]):
+2021-12-29 03:28:18,438 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:28:18,438 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:28:48,469 ==================== TRACER ======================
+2021-12-29 03:28:48,470 Channel (server worker num[20]):
+2021-12-29 03:28:48,470 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:28:48,471 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:29:18,501 ==================== TRACER ======================
+2021-12-29 03:29:18,502 Channel (server worker num[20]):
+2021-12-29 03:29:18,503 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:29:18,504 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:29:48,534 ==================== TRACER ======================
+2021-12-29 03:29:48,535 Channel (server worker num[20]):
+2021-12-29 03:29:48,536 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:29:48,536 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:30:18,567 ==================== TRACER ======================
+2021-12-29 03:30:18,568 Channel (server worker num[20]):
+2021-12-29 03:30:18,568 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:30:18,569 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:30:48,600 ==================== TRACER ======================
+2021-12-29 03:30:48,600 Channel (server worker num[20]):
+2021-12-29 03:30:48,601 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:30:48,602 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:31:18,632 ==================== TRACER ======================
+2021-12-29 03:31:18,633 Channel (server worker num[20]):
+2021-12-29 03:31:18,634 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:31:18,634 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:31:48,665 ==================== TRACER ======================
+2021-12-29 03:31:48,665 Channel (server worker num[20]):
+2021-12-29 03:31:48,666 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:31:48,667 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:32:18,697 ==================== TRACER ======================
+2021-12-29 03:32:18,698 Channel (server worker num[20]):
+2021-12-29 03:32:18,699 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:32:18,700 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:32:48,730 ==================== TRACER ======================
+2021-12-29 03:32:48,731 Channel (server worker num[20]):
+2021-12-29 03:32:48,731 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:32:48,732 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:33:18,762 ==================== TRACER ======================
+2021-12-29 03:33:18,763 Channel (server worker num[20]):
+2021-12-29 03:33:18,764 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:33:18,765 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:33:48,776 ==================== TRACER ======================
+2021-12-29 03:33:48,777 Channel (server worker num[20]):
+2021-12-29 03:33:48,778 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:33:48,779 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:34:18,809 ==================== TRACER ======================
+2021-12-29 03:34:18,810 Channel (server worker num[20]):
+2021-12-29 03:34:18,810 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:34:18,811 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:34:48,842 ==================== TRACER ======================
+2021-12-29 03:34:48,842 Channel (server worker num[20]):
+2021-12-29 03:34:48,843 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:34:48,844 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:35:18,874 ==================== TRACER ======================
+2021-12-29 03:35:18,875 Channel (server worker num[20]):
+2021-12-29 03:35:18,876 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:35:18,877 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:35:48,907 ==================== TRACER ======================
+2021-12-29 03:35:48,908 Channel (server worker num[20]):
+2021-12-29 03:35:48,909 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:35:48,909 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:36:18,940 ==================== TRACER ======================
+2021-12-29 03:36:18,941 Channel (server worker num[20]):
+2021-12-29 03:36:18,941 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:36:18,942 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:36:48,965 ==================== TRACER ======================
+2021-12-29 03:36:48,965 Channel (server worker num[20]):
+2021-12-29 03:36:48,966 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:36:48,967 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:37:18,997 ==================== TRACER ======================
+2021-12-29 03:37:18,998 Channel (server worker num[20]):
+2021-12-29 03:37:18,999 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:37:19,000 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:37:49,030 ==================== TRACER ======================
+2021-12-29 03:37:49,031 Channel (server worker num[20]):
+2021-12-29 03:37:49,031 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:37:49,032 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:38:19,035 ==================== TRACER ======================
+2021-12-29 03:38:19,036 Channel (server worker num[20]):
+2021-12-29 03:38:19,037 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:38:19,037 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:38:49,068 ==================== TRACER ======================
+2021-12-29 03:38:49,069 Channel (server worker num[20]):
+2021-12-29 03:38:49,069 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:38:49,070 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:39:19,100 ==================== TRACER ======================
+2021-12-29 03:39:19,101 Channel (server worker num[20]):
+2021-12-29 03:39:19,102 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:39:19,103 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:39:49,133 ==================== TRACER ======================
+2021-12-29 03:39:49,134 Channel (server worker num[20]):
+2021-12-29 03:39:49,135 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:39:49,135 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:40:19,166 ==================== TRACER ======================
+2021-12-29 03:40:19,166 Channel (server worker num[20]):
+2021-12-29 03:40:19,167 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:40:19,168 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:40:49,198 ==================== TRACER ======================
+2021-12-29 03:40:49,199 Channel (server worker num[20]):
+2021-12-29 03:40:49,200 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:40:49,201 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:41:19,231 ==================== TRACER ======================
+2021-12-29 03:41:19,232 Channel (server worker num[20]):
+2021-12-29 03:41:19,233 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:41:19,233 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:41:49,264 ==================== TRACER ======================
+2021-12-29 03:41:49,264 Channel (server worker num[20]):
+2021-12-29 03:41:49,265 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:41:49,266 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:42:19,296 ==================== TRACER ======================
+2021-12-29 03:42:19,297 Channel (server worker num[20]):
+2021-12-29 03:42:19,298 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:42:19,299 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:42:49,329 ==================== TRACER ======================
+2021-12-29 03:42:49,330 Channel (server worker num[20]):
+2021-12-29 03:42:49,331 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:42:49,332 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:43:19,362 ==================== TRACER ======================
+2021-12-29 03:43:19,363 Channel (server worker num[20]):
+2021-12-29 03:43:19,364 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:43:19,364 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:43:49,395 ==================== TRACER ======================
+2021-12-29 03:43:49,395 Channel (server worker num[20]):
+2021-12-29 03:43:49,396 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:43:49,397 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:44:19,427 ==================== TRACER ======================
+2021-12-29 03:44:19,428 Channel (server worker num[20]):
+2021-12-29 03:44:19,429 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:44:19,430 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:44:49,460 ==================== TRACER ======================
+2021-12-29 03:44:49,461 Channel (server worker num[20]):
+2021-12-29 03:44:49,462 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:44:49,462 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:45:19,467 ==================== TRACER ======================
+2021-12-29 03:45:19,468 Channel (server worker num[20]):
+2021-12-29 03:45:19,469 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:45:19,470 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:45:49,500 ==================== TRACER ======================
+2021-12-29 03:45:49,501 Channel (server worker num[20]):
+2021-12-29 03:45:49,502 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:45:49,502 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:46:19,533 ==================== TRACER ======================
+2021-12-29 03:46:19,533 Channel (server worker num[20]):
+2021-12-29 03:46:19,534 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:46:19,535 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:46:49,565 ==================== TRACER ======================
+2021-12-29 03:46:49,566 Channel (server worker num[20]):
+2021-12-29 03:46:49,567 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:46:49,567 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:47:19,589 ==================== TRACER ======================
+2021-12-29 03:47:19,590 Channel (server worker num[20]):
+2021-12-29 03:47:19,591 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:47:19,592 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:47:49,609 ==================== TRACER ======================
+2021-12-29 03:47:49,610 Channel (server worker num[20]):
+2021-12-29 03:47:49,610 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:47:49,611 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:48:19,641 ==================== TRACER ======================
+2021-12-29 03:48:19,642 Channel (server worker num[20]):
+2021-12-29 03:48:19,643 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:48:19,644 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:48:49,674 ==================== TRACER ======================
+2021-12-29 03:48:49,675 Channel (server worker num[20]):
+2021-12-29 03:48:49,676 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:48:49,677 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:49:19,707 ==================== TRACER ======================
+2021-12-29 03:49:19,708 Channel (server worker num[20]):
+2021-12-29 03:49:19,709 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:49:19,709 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:49:49,740 ==================== TRACER ======================
+2021-12-29 03:49:49,740 Channel (server worker num[20]):
+2021-12-29 03:49:49,741 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:49:49,742 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:50:19,772 ==================== TRACER ======================
+2021-12-29 03:50:19,773 Channel (server worker num[20]):
+2021-12-29 03:50:19,774 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:50:19,775 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:50:49,805 ==================== TRACER ======================
+2021-12-29 03:50:49,806 Channel (server worker num[20]):
+2021-12-29 03:50:49,806 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:50:49,807 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:51:19,837 ==================== TRACER ======================
+2021-12-29 03:51:19,838 Channel (server worker num[20]):
+2021-12-29 03:51:19,839 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:51:19,840 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:51:49,870 ==================== TRACER ======================
+2021-12-29 03:51:49,871 Channel (server worker num[20]):
+2021-12-29 03:51:49,872 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:51:49,872 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:52:19,880 ==================== TRACER ======================
+2021-12-29 03:52:19,881 Channel (server worker num[20]):
+2021-12-29 03:52:19,882 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:52:19,882 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:52:49,913 ==================== TRACER ======================
+2021-12-29 03:52:49,913 Channel (server worker num[20]):
+2021-12-29 03:52:49,914 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:52:49,915 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:53:19,945 ==================== TRACER ======================
+2021-12-29 03:53:19,946 Channel (server worker num[20]):
+2021-12-29 03:53:19,947 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:53:19,947 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:53:49,978 ==================== TRACER ======================
+2021-12-29 03:53:49,979 Channel (server worker num[20]):
+2021-12-29 03:53:49,980 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:53:49,980 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:54:20,011 ==================== TRACER ======================
+2021-12-29 03:54:20,011 Channel (server worker num[20]):
+2021-12-29 03:54:20,012 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:54:20,013 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:54:50,043 ==================== TRACER ======================
+2021-12-29 03:54:50,044 Channel (server worker num[20]):
+2021-12-29 03:54:50,045 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:54:50,046 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:55:20,076 ==================== TRACER ======================
+2021-12-29 03:55:20,077 Channel (server worker num[20]):
+2021-12-29 03:55:20,078 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:55:20,078 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:55:50,109 ==================== TRACER ======================
+2021-12-29 03:55:50,110 Channel (server worker num[20]):
+2021-12-29 03:55:50,110 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:55:50,111 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:56:20,141 ==================== TRACER ======================
+2021-12-29 03:56:20,142 Channel (server worker num[20]):
+2021-12-29 03:56:20,143 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:56:20,144 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:56:50,174 ==================== TRACER ======================
+2021-12-29 03:56:50,175 Channel (server worker num[20]):
+2021-12-29 03:56:50,175 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:56:50,176 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:57:20,206 ==================== TRACER ======================
+2021-12-29 03:57:20,207 Channel (server worker num[20]):
+2021-12-29 03:57:20,208 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:57:20,209 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:57:50,239 ==================== TRACER ======================
+2021-12-29 03:57:50,240 Channel (server worker num[20]):
+2021-12-29 03:57:50,241 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:57:50,241 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:58:20,272 ==================== TRACER ======================
+2021-12-29 03:58:20,273 Channel (server worker num[20]):
+2021-12-29 03:58:20,273 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:58:20,274 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:58:50,304 ==================== TRACER ======================
+2021-12-29 03:58:50,305 Channel (server worker num[20]):
+2021-12-29 03:58:50,306 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:58:50,307 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:59:20,337 ==================== TRACER ======================
+2021-12-29 03:59:20,338 Channel (server worker num[20]):
+2021-12-29 03:59:20,339 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:59:20,340 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 03:59:50,370 ==================== TRACER ======================
+2021-12-29 03:59:50,371 Channel (server worker num[20]):
+2021-12-29 03:59:50,372 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 03:59:50,372 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:00:20,403 ==================== TRACER ======================
+2021-12-29 04:00:20,404 Channel (server worker num[20]):
+2021-12-29 04:00:20,404 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:00:20,405 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:00:50,435 ==================== TRACER ======================
+2021-12-29 04:00:50,436 Channel (server worker num[20]):
+2021-12-29 04:00:50,437 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:00:50,438 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:01:20,468 ==================== TRACER ======================
+2021-12-29 04:01:20,469 Channel (server worker num[20]):
+2021-12-29 04:01:20,470 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:01:20,471 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:01:50,501 ==================== TRACER ======================
+2021-12-29 04:01:50,502 Channel (server worker num[20]):
+2021-12-29 04:01:50,503 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:01:50,503 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:02:20,534 ==================== TRACER ======================
+2021-12-29 04:02:20,535 Channel (server worker num[20]):
+2021-12-29 04:02:20,535 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:02:20,536 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:02:50,567 ==================== TRACER ======================
+2021-12-29 04:02:50,567 Channel (server worker num[20]):
+2021-12-29 04:02:50,568 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:02:50,569 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:03:20,596 ==================== TRACER ======================
+2021-12-29 04:03:20,597 Channel (server worker num[20]):
+2021-12-29 04:03:20,598 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:03:20,598 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:03:50,629 ==================== TRACER ======================
+2021-12-29 04:03:50,630 Channel (server worker num[20]):
+2021-12-29 04:03:50,631 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:03:50,631 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:04:20,660 ==================== TRACER ======================
+2021-12-29 04:04:20,661 Channel (server worker num[20]):
+2021-12-29 04:04:20,662 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:04:20,662 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:04:50,693 ==================== TRACER ======================
+2021-12-29 04:04:50,693 Channel (server worker num[20]):
+2021-12-29 04:04:50,694 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:04:50,695 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:05:20,725 ==================== TRACER ======================
+2021-12-29 04:05:20,726 Channel (server worker num[20]):
+2021-12-29 04:05:20,727 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:05:20,728 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:05:50,758 ==================== TRACER ======================
+2021-12-29 04:05:50,759 Channel (server worker num[20]):
+2021-12-29 04:05:50,760 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:05:50,761 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:06:20,791 ==================== TRACER ======================
+2021-12-29 04:06:20,792 Channel (server worker num[20]):
+2021-12-29 04:06:20,793 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:06:20,793 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:06:50,824 ==================== TRACER ======================
+2021-12-29 04:06:50,825 Channel (server worker num[20]):
+2021-12-29 04:06:50,826 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:06:50,826 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:07:20,857 ==================== TRACER ======================
+2021-12-29 04:07:20,858 Channel (server worker num[20]):
+2021-12-29 04:07:20,858 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:07:20,859 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:07:50,889 ==================== TRACER ======================
+2021-12-29 04:07:50,890 Channel (server worker num[20]):
+2021-12-29 04:07:50,891 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:07:50,892 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:08:20,908 ==================== TRACER ======================
+2021-12-29 04:08:20,909 Channel (server worker num[20]):
+2021-12-29 04:08:20,910 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:08:20,911 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:08:50,941 ==================== TRACER ======================
+2021-12-29 04:08:50,942 Channel (server worker num[20]):
+2021-12-29 04:08:50,943 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:08:50,943 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:09:20,974 ==================== TRACER ======================
+2021-12-29 04:09:20,974 Channel (server worker num[20]):
+2021-12-29 04:09:20,975 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:09:20,976 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:09:51,006 ==================== TRACER ======================
+2021-12-29 04:09:51,007 Channel (server worker num[20]):
+2021-12-29 04:09:51,008 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:09:51,009 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:10:21,039 ==================== TRACER ======================
+2021-12-29 04:10:21,040 Channel (server worker num[20]):
+2021-12-29 04:10:21,041 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:10:21,042 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:10:51,072 ==================== TRACER ======================
+2021-12-29 04:10:51,073 Channel (server worker num[20]):
+2021-12-29 04:10:51,074 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:10:51,074 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:11:21,105 ==================== TRACER ======================
+2021-12-29 04:11:21,106 Channel (server worker num[20]):
+2021-12-29 04:11:21,107 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:11:21,107 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:11:51,138 ==================== TRACER ======================
+2021-12-29 04:11:51,138 Channel (server worker num[20]):
+2021-12-29 04:11:51,139 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:11:51,140 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:12:21,170 ==================== TRACER ======================
+2021-12-29 04:12:21,171 Channel (server worker num[20]):
+2021-12-29 04:12:21,172 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:12:21,173 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:12:51,200 ==================== TRACER ======================
+2021-12-29 04:12:51,201 Channel (server worker num[20]):
+2021-12-29 04:12:51,202 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:12:51,202 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:13:21,233 ==================== TRACER ======================
+2021-12-29 04:13:21,234 Channel (server worker num[20]):
+2021-12-29 04:13:21,235 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:13:21,235 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:13:51,266 ==================== TRACER ======================
+2021-12-29 04:13:51,266 Channel (server worker num[20]):
+2021-12-29 04:13:51,267 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:13:51,268 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:14:21,298 ==================== TRACER ======================
+2021-12-29 04:14:21,299 Channel (server worker num[20]):
+2021-12-29 04:14:21,300 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:14:21,301 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:14:51,331 ==================== TRACER ======================
+2021-12-29 04:14:51,332 Channel (server worker num[20]):
+2021-12-29 04:14:51,333 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:14:51,334 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:15:21,364 ==================== TRACER ======================
+2021-12-29 04:15:21,365 Channel (server worker num[20]):
+2021-12-29 04:15:21,366 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:15:21,366 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:15:51,397 ==================== TRACER ======================
+2021-12-29 04:15:51,398 Channel (server worker num[20]):
+2021-12-29 04:15:51,398 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:15:51,399 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:16:21,430 ==================== TRACER ======================
+2021-12-29 04:16:21,430 Channel (server worker num[20]):
+2021-12-29 04:16:21,431 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:16:21,432 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:16:51,462 ==================== TRACER ======================
+2021-12-29 04:16:51,463 Channel (server worker num[20]):
+2021-12-29 04:16:51,464 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:16:51,465 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:17:21,491 ==================== TRACER ======================
+2021-12-29 04:17:21,492 Channel (server worker num[20]):
+2021-12-29 04:17:21,493 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:17:21,494 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:17:51,524 ==================== TRACER ======================
+2021-12-29 04:17:51,525 Channel (server worker num[20]):
+2021-12-29 04:17:51,526 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:17:51,527 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:18:21,557 ==================== TRACER ======================
+2021-12-29 04:18:21,558 Channel (server worker num[20]):
+2021-12-29 04:18:21,559 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:18:21,559 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:18:51,590 ==================== TRACER ======================
+2021-12-29 04:18:51,591 Channel (server worker num[20]):
+2021-12-29 04:18:51,592 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:18:51,592 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:19:21,623 ==================== TRACER ======================
+2021-12-29 04:19:21,624 Channel (server worker num[20]):
+2021-12-29 04:19:21,624 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:19:21,625 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:19:51,655 ==================== TRACER ======================
+2021-12-29 04:19:51,656 Channel (server worker num[20]):
+2021-12-29 04:19:51,657 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:19:51,658 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:20:21,688 ==================== TRACER ======================
+2021-12-29 04:20:21,689 Channel (server worker num[20]):
+2021-12-29 04:20:21,690 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:20:21,691 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:20:51,721 ==================== TRACER ======================
+2021-12-29 04:20:51,722 Channel (server worker num[20]):
+2021-12-29 04:20:51,723 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:20:51,724 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:21:21,754 ==================== TRACER ======================
+2021-12-29 04:21:21,755 Channel (server worker num[20]):
+2021-12-29 04:21:21,756 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:21:21,756 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:21:51,787 ==================== TRACER ======================
+2021-12-29 04:21:51,788 Channel (server worker num[20]):
+2021-12-29 04:21:51,788 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:21:51,789 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:22:21,819 ==================== TRACER ======================
+2021-12-29 04:22:21,820 Channel (server worker num[20]):
+2021-12-29 04:22:21,821 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:22:21,822 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:22:51,852 ==================== TRACER ======================
+2021-12-29 04:22:51,853 Channel (server worker num[20]):
+2021-12-29 04:22:51,854 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:22:51,855 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:23:21,858 ==================== TRACER ======================
+2021-12-29 04:23:21,859 Channel (server worker num[20]):
+2021-12-29 04:23:21,859 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:23:21,860 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:23:51,891 ==================== TRACER ======================
+2021-12-29 04:23:51,891 Channel (server worker num[20]):
+2021-12-29 04:23:51,892 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:23:51,893 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:24:21,923 ==================== TRACER ======================
+2021-12-29 04:24:21,924 Channel (server worker num[20]):
+2021-12-29 04:24:21,925 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:24:21,926 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:24:51,952 ==================== TRACER ======================
+2021-12-29 04:24:51,953 Channel (server worker num[20]):
+2021-12-29 04:24:51,953 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:24:51,954 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:25:21,984 ==================== TRACER ======================
+2021-12-29 04:25:21,985 Channel (server worker num[20]):
+2021-12-29 04:25:21,986 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:25:21,987 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:25:52,017 ==================== TRACER ======================
+2021-12-29 04:25:52,018 Channel (server worker num[20]):
+2021-12-29 04:25:52,019 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:25:52,019 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:26:22,050 ==================== TRACER ======================
+2021-12-29 04:26:22,051 Channel (server worker num[20]):
+2021-12-29 04:26:22,051 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:26:22,052 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:26:52,082 ==================== TRACER ======================
+2021-12-29 04:26:52,083 Channel (server worker num[20]):
+2021-12-29 04:26:52,084 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:26:52,085 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:27:22,115 ==================== TRACER ======================
+2021-12-29 04:27:22,116 Channel (server worker num[20]):
+2021-12-29 04:27:22,117 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:27:22,118 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:27:52,148 ==================== TRACER ======================
+2021-12-29 04:27:52,149 Channel (server worker num[20]):
+2021-12-29 04:27:52,150 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:27:52,151 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:28:22,181 ==================== TRACER ======================
+2021-12-29 04:28:22,182 Channel (server worker num[20]):
+2021-12-29 04:28:22,183 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:28:22,183 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:28:52,214 ==================== TRACER ======================
+2021-12-29 04:28:52,215 Channel (server worker num[20]):
+2021-12-29 04:28:52,215 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:28:52,216 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:29:22,247 ==================== TRACER ======================
+2021-12-29 04:29:22,247 Channel (server worker num[20]):
+2021-12-29 04:29:22,248 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:29:22,249 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:29:52,279 ==================== TRACER ======================
+2021-12-29 04:29:52,280 Channel (server worker num[20]):
+2021-12-29 04:29:52,281 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:29:52,282 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:30:22,291 ==================== TRACER ======================
+2021-12-29 04:30:22,292 Channel (server worker num[20]):
+2021-12-29 04:30:22,293 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:30:22,293 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:30:52,324 ==================== TRACER ======================
+2021-12-29 04:30:52,325 Channel (server worker num[20]):
+2021-12-29 04:30:52,326 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:30:52,326 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:31:22,357 ==================== TRACER ======================
+2021-12-29 04:31:22,357 Channel (server worker num[20]):
+2021-12-29 04:31:22,358 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:31:22,359 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:31:52,388 ==================== TRACER ======================
+2021-12-29 04:31:52,389 Channel (server worker num[20]):
+2021-12-29 04:31:52,390 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:31:52,391 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:32:22,421 ==================== TRACER ======================
+2021-12-29 04:32:22,422 Channel (server worker num[20]):
+2021-12-29 04:32:22,423 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:32:22,423 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:32:52,437 ==================== TRACER ======================
+2021-12-29 04:32:52,438 Channel (server worker num[20]):
+2021-12-29 04:32:52,439 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:32:52,440 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:33:22,470 ==================== TRACER ======================
+2021-12-29 04:33:22,471 Channel (server worker num[20]):
+2021-12-29 04:33:22,472 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:33:22,472 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:33:52,503 ==================== TRACER ======================
+2021-12-29 04:33:52,504 Channel (server worker num[20]):
+2021-12-29 04:33:52,504 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:33:52,505 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:34:22,536 ==================== TRACER ======================
+2021-12-29 04:34:22,536 Channel (server worker num[20]):
+2021-12-29 04:34:22,537 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:34:22,538 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:34:52,552 ==================== TRACER ======================
+2021-12-29 04:34:52,553 Channel (server worker num[20]):
+2021-12-29 04:34:52,554 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:34:52,555 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:35:22,585 ==================== TRACER ======================
+2021-12-29 04:35:22,586 Channel (server worker num[20]):
+2021-12-29 04:35:22,587 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:35:22,587 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:35:52,596 ==================== TRACER ======================
+2021-12-29 04:35:52,597 Channel (server worker num[20]):
+2021-12-29 04:35:52,598 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:35:52,599 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:36:22,629 ==================== TRACER ======================
+2021-12-29 04:36:22,630 Channel (server worker num[20]):
+2021-12-29 04:36:22,631 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:36:22,632 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:36:52,662 ==================== TRACER ======================
+2021-12-29 04:36:52,663 Channel (server worker num[20]):
+2021-12-29 04:36:52,664 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:36:52,664 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:37:22,695 ==================== TRACER ======================
+2021-12-29 04:37:22,696 Channel (server worker num[20]):
+2021-12-29 04:37:22,696 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:37:22,697 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:37:52,728 ==================== TRACER ======================
+2021-12-29 04:37:52,728 Channel (server worker num[20]):
+2021-12-29 04:37:52,729 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:37:52,730 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:38:22,760 ==================== TRACER ======================
+2021-12-29 04:38:22,761 Channel (server worker num[20]):
+2021-12-29 04:38:22,762 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:38:22,763 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:38:52,793 ==================== TRACER ======================
+2021-12-29 04:38:52,794 Channel (server worker num[20]):
+2021-12-29 04:38:52,795 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:38:52,796 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:39:22,826 ==================== TRACER ======================
+2021-12-29 04:39:22,827 Channel (server worker num[20]):
+2021-12-29 04:39:22,828 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:39:22,828 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:39:52,832 ==================== TRACER ======================
+2021-12-29 04:39:52,833 Channel (server worker num[20]):
+2021-12-29 04:39:52,834 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:39:52,834 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:40:22,849 ==================== TRACER ======================
+2021-12-29 04:40:22,850 Channel (server worker num[20]):
+2021-12-29 04:40:22,851 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:40:22,851 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:40:52,876 ==================== TRACER ======================
+2021-12-29 04:40:52,877 Channel (server worker num[20]):
+2021-12-29 04:40:52,878 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:40:52,878 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:41:22,908 ==================== TRACER ======================
+2021-12-29 04:41:22,909 Channel (server worker num[20]):
+2021-12-29 04:41:22,910 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:41:22,911 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:41:52,916 ==================== TRACER ======================
+2021-12-29 04:41:52,917 Channel (server worker num[20]):
+2021-12-29 04:41:52,918 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:41:52,919 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:42:22,949 ==================== TRACER ======================
+2021-12-29 04:42:22,950 Channel (server worker num[20]):
+2021-12-29 04:42:22,951 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:42:22,952 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:42:52,982 ==================== TRACER ======================
+2021-12-29 04:42:52,983 Channel (server worker num[20]):
+2021-12-29 04:42:52,984 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:42:52,985 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:43:23,014 ==================== TRACER ======================
+2021-12-29 04:43:23,014 Channel (server worker num[20]):
+2021-12-29 04:43:23,015 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:43:23,016 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:43:53,044 ==================== TRACER ======================
+2021-12-29 04:43:53,045 Channel (server worker num[20]):
+2021-12-29 04:43:53,046 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:43:53,047 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:44:23,077 ==================== TRACER ======================
+2021-12-29 04:44:23,078 Channel (server worker num[20]):
+2021-12-29 04:44:23,079 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:44:23,079 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:44:53,110 ==================== TRACER ======================
+2021-12-29 04:44:53,111 Channel (server worker num[20]):
+2021-12-29 04:44:53,111 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:44:53,112 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:45:23,142 ==================== TRACER ======================
+2021-12-29 04:45:23,143 Channel (server worker num[20]):
+2021-12-29 04:45:23,144 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:45:23,145 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:45:53,175 ==================== TRACER ======================
+2021-12-29 04:45:53,176 Channel (server worker num[20]):
+2021-12-29 04:45:53,177 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:45:53,178 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:46:23,208 ==================== TRACER ======================
+2021-12-29 04:46:23,209 Channel (server worker num[20]):
+2021-12-29 04:46:23,210 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:46:23,211 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:46:53,241 ==================== TRACER ======================
+2021-12-29 04:46:53,242 Channel (server worker num[20]):
+2021-12-29 04:46:53,243 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:46:53,244 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:47:23,274 ==================== TRACER ======================
+2021-12-29 04:47:23,275 Channel (server worker num[20]):
+2021-12-29 04:47:23,276 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:47:23,277 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:47:53,307 ==================== TRACER ======================
+2021-12-29 04:47:53,308 Channel (server worker num[20]):
+2021-12-29 04:47:53,309 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:47:53,309 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:48:23,340 ==================== TRACER ======================
+2021-12-29 04:48:23,341 Channel (server worker num[20]):
+2021-12-29 04:48:23,342 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:48:23,342 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:48:53,373 ==================== TRACER ======================
+2021-12-29 04:48:53,373 Channel (server worker num[20]):
+2021-12-29 04:48:53,374 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:48:53,375 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:49:23,405 ==================== TRACER ======================
+2021-12-29 04:49:23,406 Channel (server worker num[20]):
+2021-12-29 04:49:23,407 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:49:23,408 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:49:53,438 ==================== TRACER ======================
+2021-12-29 04:49:53,439 Channel (server worker num[20]):
+2021-12-29 04:49:53,440 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:49:53,440 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:50:23,471 ==================== TRACER ======================
+2021-12-29 04:50:23,472 Channel (server worker num[20]):
+2021-12-29 04:50:23,473 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:50:23,473 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:50:53,504 ==================== TRACER ======================
+2021-12-29 04:50:53,505 Channel (server worker num[20]):
+2021-12-29 04:50:53,505 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:50:53,506 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:51:23,536 ==================== TRACER ======================
+2021-12-29 04:51:23,537 Channel (server worker num[20]):
+2021-12-29 04:51:23,538 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:51:23,539 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:51:53,569 ==================== TRACER ======================
+2021-12-29 04:51:53,570 Channel (server worker num[20]):
+2021-12-29 04:51:53,571 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:51:53,572 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:52:23,602 ==================== TRACER ======================
+2021-12-29 04:52:23,603 Channel (server worker num[20]):
+2021-12-29 04:52:23,604 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:52:23,605 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:52:53,635 ==================== TRACER ======================
+2021-12-29 04:52:53,636 Channel (server worker num[20]):
+2021-12-29 04:52:53,637 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:52:53,637 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:53:23,668 ==================== TRACER ======================
+2021-12-29 04:53:23,669 Channel (server worker num[20]):
+2021-12-29 04:53:23,669 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:53:23,670 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:53:53,700 ==================== TRACER ======================
+2021-12-29 04:53:53,701 Channel (server worker num[20]):
+2021-12-29 04:53:53,702 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:53:53,703 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:54:23,733 ==================== TRACER ======================
+2021-12-29 04:54:23,734 Channel (server worker num[20]):
+2021-12-29 04:54:23,735 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:54:23,736 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:54:53,766 ==================== TRACER ======================
+2021-12-29 04:54:53,767 Channel (server worker num[20]):
+2021-12-29 04:54:53,768 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:54:53,768 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:55:23,799 ==================== TRACER ======================
+2021-12-29 04:55:23,800 Channel (server worker num[20]):
+2021-12-29 04:55:23,801 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:55:23,801 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:55:53,832 ==================== TRACER ======================
+2021-12-29 04:55:53,833 Channel (server worker num[20]):
+2021-12-29 04:55:53,833 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:55:53,834 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:56:23,864 ==================== TRACER ======================
+2021-12-29 04:56:23,865 Channel (server worker num[20]):
+2021-12-29 04:56:23,866 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:56:23,867 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:56:53,897 ==================== TRACER ======================
+2021-12-29 04:56:53,898 Channel (server worker num[20]):
+2021-12-29 04:56:53,899 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:56:53,900 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:57:23,930 ==================== TRACER ======================
+2021-12-29 04:57:23,931 Channel (server worker num[20]):
+2021-12-29 04:57:23,932 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:57:23,932 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:57:53,963 ==================== TRACER ======================
+2021-12-29 04:57:53,964 Channel (server worker num[20]):
+2021-12-29 04:57:53,964 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:57:53,965 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:58:23,996 ==================== TRACER ======================
+2021-12-29 04:58:23,996 Channel (server worker num[20]):
+2021-12-29 04:58:23,997 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:58:23,998 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:58:54,028 ==================== TRACER ======================
+2021-12-29 04:58:54,029 Channel (server worker num[20]):
+2021-12-29 04:58:54,030 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:58:54,030 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:59:24,061 ==================== TRACER ======================
+2021-12-29 04:59:24,062 Channel (server worker num[20]):
+2021-12-29 04:59:24,063 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:59:24,063 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 04:59:54,094 ==================== TRACER ======================
+2021-12-29 04:59:54,094 Channel (server worker num[20]):
+2021-12-29 04:59:54,095 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 04:59:54,096 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:00:24,126 ==================== TRACER ======================
+2021-12-29 05:00:24,127 Channel (server worker num[20]):
+2021-12-29 05:00:24,128 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:00:24,129 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:00:54,159 ==================== TRACER ======================
+2021-12-29 05:00:54,160 Channel (server worker num[20]):
+2021-12-29 05:00:54,161 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:00:54,162 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:01:24,190 ==================== TRACER ======================
+2021-12-29 05:01:24,191 Channel (server worker num[20]):
+2021-12-29 05:01:24,192 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:01:24,192 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:01:54,223 ==================== TRACER ======================
+2021-12-29 05:01:54,223 Channel (server worker num[20]):
+2021-12-29 05:01:54,224 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:01:54,225 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:02:24,255 ==================== TRACER ======================
+2021-12-29 05:02:24,256 Channel (server worker num[20]):
+2021-12-29 05:02:24,257 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:02:24,258 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:02:54,288 ==================== TRACER ======================
+2021-12-29 05:02:54,289 Channel (server worker num[20]):
+2021-12-29 05:02:54,290 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:02:54,291 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:03:24,321 ==================== TRACER ======================
+2021-12-29 05:03:24,322 Channel (server worker num[20]):
+2021-12-29 05:03:24,323 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:03:24,323 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:03:54,354 ==================== TRACER ======================
+2021-12-29 05:03:54,355 Channel (server worker num[20]):
+2021-12-29 05:03:54,355 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:03:54,356 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:04:24,387 ==================== TRACER ======================
+2021-12-29 05:04:24,387 Channel (server worker num[20]):
+2021-12-29 05:04:24,388 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:04:24,389 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:04:54,419 ==================== TRACER ======================
+2021-12-29 05:04:54,420 Channel (server worker num[20]):
+2021-12-29 05:04:54,421 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:04:54,422 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:05:24,434 ==================== TRACER ======================
+2021-12-29 05:05:24,435 Channel (server worker num[20]):
+2021-12-29 05:05:24,436 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:05:24,437 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:05:54,467 ==================== TRACER ======================
+2021-12-29 05:05:54,468 Channel (server worker num[20]):
+2021-12-29 05:05:54,469 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:05:54,469 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:06:24,500 ==================== TRACER ======================
+2021-12-29 05:06:24,501 Channel (server worker num[20]):
+2021-12-29 05:06:24,501 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:06:24,502 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:06:54,532 ==================== TRACER ======================
+2021-12-29 05:06:54,533 Channel (server worker num[20]):
+2021-12-29 05:06:54,534 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:06:54,535 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:07:24,565 ==================== TRACER ======================
+2021-12-29 05:07:24,566 Channel (server worker num[20]):
+2021-12-29 05:07:24,567 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:07:24,568 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:07:54,598 ==================== TRACER ======================
+2021-12-29 05:07:54,599 Channel (server worker num[20]):
+2021-12-29 05:07:54,600 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:07:54,600 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:08:24,631 ==================== TRACER ======================
+2021-12-29 05:08:24,632 Channel (server worker num[20]):
+2021-12-29 05:08:24,633 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:08:24,633 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:08:54,664 ==================== TRACER ======================
+2021-12-29 05:08:54,665 Channel (server worker num[20]):
+2021-12-29 05:08:54,665 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:08:54,666 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:09:24,696 ==================== TRACER ======================
+2021-12-29 05:09:24,697 Channel (server worker num[20]):
+2021-12-29 05:09:24,698 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:09:24,699 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:09:54,729 ==================== TRACER ======================
+2021-12-29 05:09:54,730 Channel (server worker num[20]):
+2021-12-29 05:09:54,731 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:09:54,732 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:10:24,762 ==================== TRACER ======================
+2021-12-29 05:10:24,763 Channel (server worker num[20]):
+2021-12-29 05:10:24,764 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:10:24,765 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:10:54,795 ==================== TRACER ======================
+2021-12-29 05:10:54,796 Channel (server worker num[20]):
+2021-12-29 05:10:54,798 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:10:54,799 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:11:24,829 ==================== TRACER ======================
+2021-12-29 05:11:24,830 Channel (server worker num[20]):
+2021-12-29 05:11:24,831 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:11:24,832 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:11:54,858 ==================== TRACER ======================
+2021-12-29 05:11:54,859 Channel (server worker num[20]):
+2021-12-29 05:11:54,859 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:11:54,860 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:12:24,890 ==================== TRACER ======================
+2021-12-29 05:12:24,891 Channel (server worker num[20]):
+2021-12-29 05:12:24,892 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:12:24,893 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:12:54,923 ==================== TRACER ======================
+2021-12-29 05:12:54,924 Channel (server worker num[20]):
+2021-12-29 05:12:54,925 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:12:54,926 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:13:24,935 ==================== TRACER ======================
+2021-12-29 05:13:24,936 Channel (server worker num[20]):
+2021-12-29 05:13:24,937 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:13:24,937 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:13:54,968 ==================== TRACER ======================
+2021-12-29 05:13:54,969 Channel (server worker num[20]):
+2021-12-29 05:13:54,970 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:13:54,970 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:14:25,001 ==================== TRACER ======================
+2021-12-29 05:14:25,001 Channel (server worker num[20]):
+2021-12-29 05:14:25,002 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:14:25,003 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:14:55,033 ==================== TRACER ======================
+2021-12-29 05:14:55,034 Channel (server worker num[20]):
+2021-12-29 05:14:55,035 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:14:55,036 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:15:25,066 ==================== TRACER ======================
+2021-12-29 05:15:25,067 Channel (server worker num[20]):
+2021-12-29 05:15:25,068 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:15:25,069 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:15:55,099 ==================== TRACER ======================
+2021-12-29 05:15:55,100 Channel (server worker num[20]):
+2021-12-29 05:15:55,101 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:15:55,101 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:16:25,132 ==================== TRACER ======================
+2021-12-29 05:16:25,133 Channel (server worker num[20]):
+2021-12-29 05:16:25,134 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:16:25,134 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:16:55,165 ==================== TRACER ======================
+2021-12-29 05:16:55,165 Channel (server worker num[20]):
+2021-12-29 05:16:55,166 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:16:55,167 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:17:25,197 ==================== TRACER ======================
+2021-12-29 05:17:25,198 Channel (server worker num[20]):
+2021-12-29 05:17:25,199 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:17:25,200 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:17:55,230 ==================== TRACER ======================
+2021-12-29 05:17:55,231 Channel (server worker num[20]):
+2021-12-29 05:17:55,232 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:17:55,233 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:18:25,263 ==================== TRACER ======================
+2021-12-29 05:18:25,264 Channel (server worker num[20]):
+2021-12-29 05:18:25,265 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:18:25,265 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:18:55,296 ==================== TRACER ======================
+2021-12-29 05:18:55,297 Channel (server worker num[20]):
+2021-12-29 05:18:55,298 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:18:55,298 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:19:25,329 ==================== TRACER ======================
+2021-12-29 05:19:25,330 Channel (server worker num[20]):
+2021-12-29 05:19:25,330 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:19:25,331 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:19:55,361 ==================== TRACER ======================
+2021-12-29 05:19:55,362 Channel (server worker num[20]):
+2021-12-29 05:19:55,363 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:19:55,364 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:20:25,394 ==================== TRACER ======================
+2021-12-29 05:20:25,395 Channel (server worker num[20]):
+2021-12-29 05:20:25,396 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:20:25,397 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:20:55,427 ==================== TRACER ======================
+2021-12-29 05:20:55,428 Channel (server worker num[20]):
+2021-12-29 05:20:55,429 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:20:55,430 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:21:25,444 ==================== TRACER ======================
+2021-12-29 05:21:25,445 Channel (server worker num[20]):
+2021-12-29 05:21:25,445 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:21:25,446 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:21:55,476 ==================== TRACER ======================
+2021-12-29 05:21:55,477 Channel (server worker num[20]):
+2021-12-29 05:21:55,478 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:21:55,479 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:22:25,509 ==================== TRACER ======================
+2021-12-29 05:22:25,510 Channel (server worker num[20]):
+2021-12-29 05:22:25,511 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:22:25,512 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:22:55,542 ==================== TRACER ======================
+2021-12-29 05:22:55,543 Channel (server worker num[20]):
+2021-12-29 05:22:55,544 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:22:55,544 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:23:25,574 ==================== TRACER ======================
+2021-12-29 05:23:25,575 Channel (server worker num[20]):
+2021-12-29 05:23:25,576 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:23:25,577 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:23:55,607 ==================== TRACER ======================
+2021-12-29 05:23:55,608 Channel (server worker num[20]):
+2021-12-29 05:23:55,609 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:23:55,610 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:24:25,640 ==================== TRACER ======================
+2021-12-29 05:24:25,641 Channel (server worker num[20]):
+2021-12-29 05:24:25,642 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:24:25,642 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:24:55,673 ==================== TRACER ======================
+2021-12-29 05:24:55,674 Channel (server worker num[20]):
+2021-12-29 05:24:55,674 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:24:55,675 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:25:25,706 ==================== TRACER ======================
+2021-12-29 05:25:25,706 Channel (server worker num[20]):
+2021-12-29 05:25:25,707 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:25:25,708 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:25:55,738 ==================== TRACER ======================
+2021-12-29 05:25:55,739 Channel (server worker num[20]):
+2021-12-29 05:25:55,740 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:25:55,740 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:26:25,771 ==================== TRACER ======================
+2021-12-29 05:26:25,772 Channel (server worker num[20]):
+2021-12-29 05:26:25,772 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:26:25,773 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:26:55,804 ==================== TRACER ======================
+2021-12-29 05:26:55,804 Channel (server worker num[20]):
+2021-12-29 05:26:55,805 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:26:55,806 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:27:25,825 ==================== TRACER ======================
+2021-12-29 05:27:25,826 Channel (server worker num[20]):
+2021-12-29 05:27:25,826 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:27:25,827 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:27:55,857 ==================== TRACER ======================
+2021-12-29 05:27:55,858 Channel (server worker num[20]):
+2021-12-29 05:27:55,859 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:27:55,860 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:28:25,890 ==================== TRACER ======================
+2021-12-29 05:28:25,891 Channel (server worker num[20]):
+2021-12-29 05:28:25,892 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:28:25,892 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:28:55,923 ==================== TRACER ======================
+2021-12-29 05:28:55,924 Channel (server worker num[20]):
+2021-12-29 05:28:55,924 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:28:55,925 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:29:25,955 ==================== TRACER ======================
+2021-12-29 05:29:25,956 Channel (server worker num[20]):
+2021-12-29 05:29:25,957 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:29:25,958 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:29:55,990 ==================== TRACER ======================
+2021-12-29 05:29:56,009 Channel (server worker num[20]):
+2021-12-29 05:29:56,050 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:29:56,079 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:30:26,119 ==================== TRACER ======================
+2021-12-29 05:30:26,119 Channel (server worker num[20]):
+2021-12-29 05:30:26,120 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:30:26,121 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:30:56,151 ==================== TRACER ======================
+2021-12-29 05:30:56,152 Channel (server worker num[20]):
+2021-12-29 05:30:56,153 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:30:56,154 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:31:26,184 ==================== TRACER ======================
+2021-12-29 05:31:26,185 Channel (server worker num[20]):
+2021-12-29 05:31:26,186 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:31:26,187 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:31:56,217 ==================== TRACER ======================
+2021-12-29 05:31:56,218 Channel (server worker num[20]):
+2021-12-29 05:31:56,219 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:31:56,219 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:32:26,250 ==================== TRACER ======================
+2021-12-29 05:32:26,251 Channel (server worker num[20]):
+2021-12-29 05:32:26,251 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:32:26,252 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:32:56,281 ==================== TRACER ======================
+2021-12-29 05:32:56,282 Channel (server worker num[20]):
+2021-12-29 05:32:56,283 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:32:56,284 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:33:26,312 ==================== TRACER ======================
+2021-12-29 05:33:26,313 Channel (server worker num[20]):
+2021-12-29 05:33:26,314 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:33:26,315 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:33:56,345 ==================== TRACER ======================
+2021-12-29 05:33:56,346 Channel (server worker num[20]):
+2021-12-29 05:33:56,347 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:33:56,347 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:34:26,378 ==================== TRACER ======================
+2021-12-29 05:34:26,378 Channel (server worker num[20]):
+2021-12-29 05:34:26,379 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:34:26,380 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:34:56,408 ==================== TRACER ======================
+2021-12-29 05:34:56,409 Channel (server worker num[20]):
+2021-12-29 05:34:56,410 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:34:56,410 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:35:26,440 ==================== TRACER ======================
+2021-12-29 05:35:26,441 Channel (server worker num[20]):
+2021-12-29 05:35:26,442 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:35:26,442 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:35:56,472 ==================== TRACER ======================
+2021-12-29 05:35:56,473 Channel (server worker num[20]):
+2021-12-29 05:35:56,474 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:35:56,475 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:36:26,505 ==================== TRACER ======================
+2021-12-29 05:36:26,506 Channel (server worker num[20]):
+2021-12-29 05:36:26,507 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:36:26,508 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:36:56,538 ==================== TRACER ======================
+2021-12-29 05:36:56,539 Channel (server worker num[20]):
+2021-12-29 05:36:56,540 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:36:56,540 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:37:04,999 ==================== TRACER ======================
+2021-12-29 05:37:05,001 Channel (server worker num[20]):
+2021-12-29 05:37:05,003 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:37:05,004 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:37:26,571 ==================== TRACER ======================
+2021-12-29 05:37:26,572 Channel (server worker num[20]):
+2021-12-29 05:37:26,573 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:37:26,573 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:37:35,034 ==================== TRACER ======================
+2021-12-29 05:37:35,035 DAGExecutor:
+2021-12-29 05:37:35,036 Query count[1]
+2021-12-29 05:37:35,036 QPS[0.03333333333333333 q/s]
+2021-12-29 05:37:35,036 Succ[0.0]
+2021-12-29 05:37:35,036 Error req[0]
+2021-12-29 05:37:35,037 Latency:
+2021-12-29 05:37:35,037 ave[1922.774 ms]
+2021-12-29 05:37:35,037 .50[1922.774 ms]
+2021-12-29 05:37:35,037 .60[1922.774 ms]
+2021-12-29 05:37:35,037 .70[1922.774 ms]
+2021-12-29 05:37:35,037 .80[1922.774 ms]
+2021-12-29 05:37:35,038 .90[1922.774 ms]
+2021-12-29 05:37:35,038 .95[1922.774 ms]
+2021-12-29 05:37:35,038 .99[1922.774 ms]
+2021-12-29 05:37:35,038 Channel (server worker num[20]):
+2021-12-29 05:37:35,039 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:37:35,040 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:37:56,604 ==================== TRACER ======================
+2021-12-29 05:37:56,604 Channel (server worker num[20]):
+2021-12-29 05:37:56,605 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:37:56,606 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:38:05,070 ==================== TRACER ======================
+2021-12-29 05:38:05,071 Channel (server worker num[20]):
+2021-12-29 05:38:05,072 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:38:05,072 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:38:26,636 ==================== TRACER ======================
+2021-12-29 05:38:26,637 Channel (server worker num[20]):
+2021-12-29 05:38:26,638 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:38:26,639 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:38:35,103 ==================== TRACER ======================
+2021-12-29 05:38:35,104 Channel (server worker num[20]):
+2021-12-29 05:38:35,104 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:38:35,105 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:38:56,669 ==================== TRACER ======================
+2021-12-29 05:38:56,670 Channel (server worker num[20]):
+2021-12-29 05:38:56,671 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:38:56,672 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:39:05,135 ==================== TRACER ======================
+2021-12-29 05:39:05,136 Channel (server worker num[20]):
+2021-12-29 05:39:05,137 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:39:05,138 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:39:26,702 ==================== TRACER ======================
+2021-12-29 05:39:26,703 Channel (server worker num[20]):
+2021-12-29 05:39:26,704 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:39:26,704 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:39:35,168 ==================== TRACER ======================
+2021-12-29 05:39:35,169 Channel (server worker num[20]):
+2021-12-29 05:39:35,170 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:39:35,170 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:39:56,735 ==================== TRACER ======================
+2021-12-29 05:39:56,735 Channel (server worker num[20]):
+2021-12-29 05:39:56,736 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:39:56,737 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:40:05,171 ==================== TRACER ======================
+2021-12-29 05:40:05,172 Channel (server worker num[20]):
+2021-12-29 05:40:05,173 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:40:05,173 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:40:11,916 ==================== TRACER ======================
+2021-12-29 05:40:11,918 Channel (server worker num[20]):
+2021-12-29 05:40:11,922 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:40:11,922 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:40:26,767 ==================== TRACER ======================
+2021-12-29 05:40:26,768 Channel (server worker num[20]):
+2021-12-29 05:40:26,769 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:40:26,770 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:40:41,953 ==================== TRACER ======================
+2021-12-29 05:40:41,954 DAGExecutor:
+2021-12-29 05:40:41,954 Query count[1]
+2021-12-29 05:40:41,954 QPS[0.03333333333333333 q/s]
+2021-12-29 05:40:41,955 Succ[0.0]
+2021-12-29 05:40:41,955 Error req[0]
+2021-12-29 05:40:41,955 Latency:
+2021-12-29 05:40:41,955 ave[1827.423 ms]
+2021-12-29 05:40:41,955 .50[1827.423 ms]
+2021-12-29 05:40:41,956 .60[1827.423 ms]
+2021-12-29 05:40:41,956 .70[1827.423 ms]
+2021-12-29 05:40:41,956 .80[1827.423 ms]
+2021-12-29 05:40:41,956 .90[1827.423 ms]
+2021-12-29 05:40:41,956 .95[1827.423 ms]
+2021-12-29 05:40:41,957 .99[1827.423 ms]
+2021-12-29 05:40:41,957 Channel (server worker num[20]):
+2021-12-29 05:40:41,957 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:40:41,958 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:40:56,776 ==================== TRACER ======================
+2021-12-29 05:40:56,777 Channel (server worker num[20]):
+2021-12-29 05:40:56,778 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:40:56,779 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:41:11,989 ==================== TRACER ======================
+2021-12-29 05:41:11,989 Channel (server worker num[20]):
+2021-12-29 05:41:11,990 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:41:11,991 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:41:26,808 ==================== TRACER ======================
+2021-12-29 05:41:26,809 Channel (server worker num[20]):
+2021-12-29 05:41:26,810 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:41:26,810 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:41:42,021 ==================== TRACER ======================
+2021-12-29 05:41:42,022 Channel (server worker num[20]):
+2021-12-29 05:41:42,023 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:41:42,024 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:41:56,841 ==================== TRACER ======================
+2021-12-29 05:41:56,842 Channel (server worker num[20]):
+2021-12-29 05:41:56,842 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:41:56,843 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:42:11,656 ==================== TRACER ======================
+2021-12-29 05:42:11,657 Channel (server worker num[20]):
+2021-12-29 05:42:11,659 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:42:11,660 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:42:26,872 ==================== TRACER ======================
+2021-12-29 05:42:26,873 Channel (server worker num[20]):
+2021-12-29 05:42:26,874 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:42:26,874 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:42:41,691 ==================== TRACER ======================
+2021-12-29 05:42:41,692 DAGExecutor:
+2021-12-29 05:42:41,692 Query count[1]
+2021-12-29 05:42:41,693 QPS[0.03333333333333333 q/s]
+2021-12-29 05:42:41,693 Succ[0.0]
+2021-12-29 05:42:41,693 Error req[0]
+2021-12-29 05:42:41,693 Latency:
+2021-12-29 05:42:41,693 ave[1872.395 ms]
+2021-12-29 05:42:41,694 .50[1872.395 ms]
+2021-12-29 05:42:41,694 .60[1872.395 ms]
+2021-12-29 05:42:41,694 .70[1872.395 ms]
+2021-12-29 05:42:41,694 .80[1872.395 ms]
+2021-12-29 05:42:41,694 .90[1872.395 ms]
+2021-12-29 05:42:41,695 .95[1872.395 ms]
+2021-12-29 05:42:41,695 .99[1872.395 ms]
+2021-12-29 05:42:41,695 Channel (server worker num[20]):
+2021-12-29 05:42:41,696 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:42:41,697 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:42:56,904 ==================== TRACER ======================
+2021-12-29 05:42:56,905 Channel (server worker num[20]):
+2021-12-29 05:42:56,906 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:42:56,906 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:43:11,727 ==================== TRACER ======================
+2021-12-29 05:43:11,728 Channel (server worker num[20]):
+2021-12-29 05:43:11,729 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:43:11,730 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:43:26,937 ==================== TRACER ======================
+2021-12-29 05:43:26,938 Channel (server worker num[20]):
+2021-12-29 05:43:26,939 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:43:26,939 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:43:41,760 ==================== TRACER ======================
+2021-12-29 05:43:41,761 Channel (server worker num[20]):
+2021-12-29 05:43:41,761 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:43:41,762 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:43:56,943 ==================== TRACER ======================
+2021-12-29 05:43:56,944 Channel (server worker num[20]):
+2021-12-29 05:43:56,944 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:43:56,945 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:44:11,792 ==================== TRACER ======================
+2021-12-29 05:44:11,793 Channel (server worker num[20]):
+2021-12-29 05:44:11,794 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:44:11,795 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:44:26,976 ==================== TRACER ======================
+2021-12-29 05:44:26,976 Channel (server worker num[20]):
+2021-12-29 05:44:26,977 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:44:26,978 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:44:41,800 ==================== TRACER ======================
+2021-12-29 05:44:41,801 Channel (server worker num[20]):
+2021-12-29 05:44:41,802 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:44:41,803 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:44:57,008 ==================== TRACER ======================
+2021-12-29 05:44:57,009 Channel (server worker num[20]):
+2021-12-29 05:44:57,010 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:44:57,011 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:45:11,833 ==================== TRACER ======================
+2021-12-29 05:45:11,834 Channel (server worker num[20]):
+2021-12-29 05:45:11,835 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:45:11,835 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:45:27,041 ==================== TRACER ======================
+2021-12-29 05:45:27,042 Channel (server worker num[20]):
+2021-12-29 05:45:27,043 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:45:27,043 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:45:41,866 ==================== TRACER ======================
+2021-12-29 05:45:41,867 Channel (server worker num[20]):
+2021-12-29 05:45:41,867 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:45:41,868 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:45:57,074 ==================== TRACER ======================
+2021-12-29 05:45:57,075 Channel (server worker num[20]):
+2021-12-29 05:45:57,075 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:45:57,076 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:46:11,898 ==================== TRACER ======================
+2021-12-29 05:46:11,899 Channel (server worker num[20]):
+2021-12-29 05:46:11,900 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:46:11,901 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:46:27,092 ==================== TRACER ======================
+2021-12-29 05:46:27,093 Channel (server worker num[20]):
+2021-12-29 05:46:27,094 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:46:27,095 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:46:41,931 ==================== TRACER ======================
+2021-12-29 05:46:41,932 Channel (server worker num[20]):
+2021-12-29 05:46:41,933 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:46:41,934 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:46:57,108 ==================== TRACER ======================
+2021-12-29 05:46:57,109 Channel (server worker num[20]):
+2021-12-29 05:46:57,110 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:46:57,111 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:47:11,964 ==================== TRACER ======================
+2021-12-29 05:47:11,965 Channel (server worker num[20]):
+2021-12-29 05:47:11,966 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:47:11,967 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:47:27,141 ==================== TRACER ======================
+2021-12-29 05:47:27,142 Channel (server worker num[20]):
+2021-12-29 05:47:27,143 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:47:27,143 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:47:41,974 ==================== TRACER ======================
+2021-12-29 05:47:41,975 Channel (server worker num[20]):
+2021-12-29 05:47:41,976 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:47:41,977 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:47:57,174 ==================== TRACER ======================
+2021-12-29 05:47:57,174 Channel (server worker num[20]):
+2021-12-29 05:47:57,175 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:47:57,176 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:48:12,007 ==================== TRACER ======================
+2021-12-29 05:48:12,008 Channel (server worker num[20]):
+2021-12-29 05:48:12,009 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:48:12,010 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:48:27,206 ==================== TRACER ======================
+2021-12-29 05:48:27,207 Channel (server worker num[20]):
+2021-12-29 05:48:27,208 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:48:27,209 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:48:42,040 ==================== TRACER ======================
+2021-12-29 05:48:42,041 Channel (server worker num[20]):
+2021-12-29 05:48:42,042 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:48:42,042 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:48:57,239 ==================== TRACER ======================
+2021-12-29 05:48:57,240 Channel (server worker num[20]):
+2021-12-29 05:48:57,241 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:48:57,242 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:49:12,073 ==================== TRACER ======================
+2021-12-29 05:49:12,074 Channel (server worker num[20]):
+2021-12-29 05:49:12,074 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:49:12,075 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:49:27,272 ==================== TRACER ======================
+2021-12-29 05:49:27,273 Channel (server worker num[20]):
+2021-12-29 05:49:27,274 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:49:27,274 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:49:42,105 ==================== TRACER ======================
+2021-12-29 05:49:42,106 Channel (server worker num[20]):
+2021-12-29 05:49:42,107 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:49:42,108 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:49:57,305 ==================== TRACER ======================
+2021-12-29 05:49:57,305 Channel (server worker num[20]):
+2021-12-29 05:49:57,306 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:49:57,307 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:50:12,138 ==================== TRACER ======================
+2021-12-29 05:50:12,139 Channel (server worker num[20]):
+2021-12-29 05:50:12,140 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:50:12,141 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:50:27,337 ==================== TRACER ======================
+2021-12-29 05:50:27,338 Channel (server worker num[20]):
+2021-12-29 05:50:27,339 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:50:27,340 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:50:42,171 ==================== TRACER ======================
+2021-12-29 05:50:42,172 Channel (server worker num[20]):
+2021-12-29 05:50:42,173 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:50:42,174 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:50:57,370 ==================== TRACER ======================
+2021-12-29 05:50:57,371 Channel (server worker num[20]):
+2021-12-29 05:50:57,372 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:50:57,373 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:51:12,204 ==================== TRACER ======================
+2021-12-29 05:51:12,205 Channel (server worker num[20]):
+2021-12-29 05:51:12,206 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:51:12,206 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:51:27,381 ==================== TRACER ======================
+2021-12-29 05:51:27,382 Channel (server worker num[20]):
+2021-12-29 05:51:27,383 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:51:27,383 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:51:42,237 ==================== TRACER ======================
+2021-12-29 05:51:42,238 Channel (server worker num[20]):
+2021-12-29 05:51:42,238 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:51:42,239 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:51:57,414 ==================== TRACER ======================
+2021-12-29 05:51:57,415 Channel (server worker num[20]):
+2021-12-29 05:51:57,415 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:51:57,416 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:52:12,254 ==================== TRACER ======================
+2021-12-29 05:52:12,255 Channel (server worker num[20]):
+2021-12-29 05:52:12,256 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:52:12,256 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:52:27,446 ==================== TRACER ======================
+2021-12-29 05:52:27,447 Channel (server worker num[20]):
+2021-12-29 05:52:27,448 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:52:27,449 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:52:42,287 ==================== TRACER ======================
+2021-12-29 05:52:42,288 Channel (server worker num[20]):
+2021-12-29 05:52:42,288 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:52:42,289 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:52:57,479 ==================== TRACER ======================
+2021-12-29 05:52:57,480 Channel (server worker num[20]):
+2021-12-29 05:52:57,481 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:52:57,482 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:53:12,319 ==================== TRACER ======================
+2021-12-29 05:53:12,320 Channel (server worker num[20]):
+2021-12-29 05:53:12,321 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:53:12,322 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:53:27,512 ==================== TRACER ======================
+2021-12-29 05:53:27,513 Channel (server worker num[20]):
+2021-12-29 05:53:27,514 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:53:27,514 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:53:42,352 ==================== TRACER ======================
+2021-12-29 05:53:42,353 Channel (server worker num[20]):
+2021-12-29 05:53:42,354 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:53:42,355 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:53:57,520 ==================== TRACER ======================
+2021-12-29 05:53:57,521 Channel (server worker num[20]):
+2021-12-29 05:53:57,521 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:53:57,522 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:54:12,369 ==================== TRACER ======================
+2021-12-29 05:54:12,370 Channel (server worker num[20]):
+2021-12-29 05:54:12,371 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:54:12,371 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:54:27,552 ==================== TRACER ======================
+2021-12-29 05:54:27,553 Channel (server worker num[20]):
+2021-12-29 05:54:27,554 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:54:27,555 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:54:42,400 ==================== TRACER ======================
+2021-12-29 05:54:42,401 Channel (server worker num[20]):
+2021-12-29 05:54:42,402 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:54:42,403 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:54:57,585 ==================== TRACER ======================
+2021-12-29 05:54:57,586 Channel (server worker num[20]):
+2021-12-29 05:54:57,587 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:54:57,588 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:55:12,433 ==================== TRACER ======================
+2021-12-29 05:55:12,434 Channel (server worker num[20]):
+2021-12-29 05:55:12,435 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:55:12,435 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:55:27,618 ==================== TRACER ======================
+2021-12-29 05:55:27,619 Channel (server worker num[20]):
+2021-12-29 05:55:27,620 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:55:27,621 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:55:42,466 ==================== TRACER ======================
+2021-12-29 05:55:42,467 Channel (server worker num[20]):
+2021-12-29 05:55:42,467 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:55:42,468 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:55:57,648 ==================== TRACER ======================
+2021-12-29 05:55:57,649 Channel (server worker num[20]):
+2021-12-29 05:55:57,650 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:55:57,650 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:56:12,498 ==================== TRACER ======================
+2021-12-29 05:56:12,499 Channel (server worker num[20]):
+2021-12-29 05:56:12,500 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:56:12,501 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:56:27,680 ==================== TRACER ======================
+2021-12-29 05:56:27,681 Channel (server worker num[20]):
+2021-12-29 05:56:27,683 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:56:27,683 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:56:42,531 ==================== TRACER ======================
+2021-12-29 05:56:42,532 Channel (server worker num[20]):
+2021-12-29 05:56:42,533 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:56:42,534 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:56:57,714 ==================== TRACER ======================
+2021-12-29 05:56:57,715 Channel (server worker num[20]):
+2021-12-29 05:56:57,716 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:56:57,716 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:57:12,564 ==================== TRACER ======================
+2021-12-29 05:57:12,565 Channel (server worker num[20]):
+2021-12-29 05:57:12,566 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:57:12,567 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:57:27,747 ==================== TRACER ======================
+2021-12-29 05:57:27,748 Channel (server worker num[20]):
+2021-12-29 05:57:27,748 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:57:27,749 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:57:42,585 ==================== TRACER ======================
+2021-12-29 05:57:42,586 Channel (server worker num[20]):
+2021-12-29 05:57:42,586 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:57:42,587 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:57:57,779 ==================== TRACER ======================
+2021-12-29 05:57:57,780 Channel (server worker num[20]):
+2021-12-29 05:57:57,781 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:57:57,782 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:58:12,617 ==================== TRACER ======================
+2021-12-29 05:58:12,618 Channel (server worker num[20]):
+2021-12-29 05:58:12,619 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:58:12,620 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:58:27,812 ==================== TRACER ======================
+2021-12-29 05:58:27,813 Channel (server worker num[20]):
+2021-12-29 05:58:27,814 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:58:27,815 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:58:42,650 ==================== TRACER ======================
+2021-12-29 05:58:42,651 Channel (server worker num[20]):
+2021-12-29 05:58:42,652 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:58:42,653 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:58:57,815 ==================== TRACER ======================
+2021-12-29 05:58:57,816 Channel (server worker num[20]):
+2021-12-29 05:58:57,817 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:58:57,818 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:59:12,683 ==================== TRACER ======================
+2021-12-29 05:59:12,684 Channel (server worker num[20]):
+2021-12-29 05:59:12,685 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:59:12,686 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:59:27,848 ==================== TRACER ======================
+2021-12-29 05:59:27,849 Channel (server worker num[20]):
+2021-12-29 05:59:27,850 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:59:27,850 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:59:42,716 ==================== TRACER ======================
+2021-12-29 05:59:42,717 Channel (server worker num[20]):
+2021-12-29 05:59:42,718 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:59:42,718 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 05:59:57,880 ==================== TRACER ======================
+2021-12-29 05:59:57,881 Channel (server worker num[20]):
+2021-12-29 05:59:57,882 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 05:59:57,882 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:00:12,749 ==================== TRACER ======================
+2021-12-29 06:00:12,750 Channel (server worker num[20]):
+2021-12-29 06:00:12,751 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:00:12,751 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:00:27,913 ==================== TRACER ======================
+2021-12-29 06:00:27,914 Channel (server worker num[20]):
+2021-12-29 06:00:27,914 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:00:27,915 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:00:42,782 ==================== TRACER ======================
+2021-12-29 06:00:42,782 Channel (server worker num[20]):
+2021-12-29 06:00:42,783 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:00:42,784 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:00:57,946 ==================== TRACER ======================
+2021-12-29 06:00:57,946 Channel (server worker num[20]):
+2021-12-29 06:00:57,947 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:00:57,948 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:01:12,814 ==================== TRACER ======================
+2021-12-29 06:01:12,815 Channel (server worker num[20]):
+2021-12-29 06:01:12,816 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:01:12,817 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:01:27,978 ==================== TRACER ======================
+2021-12-29 06:01:27,979 Channel (server worker num[20]):
+2021-12-29 06:01:27,980 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:01:27,981 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:01:42,847 ==================== TRACER ======================
+2021-12-29 06:01:42,848 Channel (server worker num[20]):
+2021-12-29 06:01:42,849 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:01:42,850 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:01:58,011 ==================== TRACER ======================
+2021-12-29 06:01:58,012 Channel (server worker num[20]):
+2021-12-29 06:01:58,013 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:01:58,013 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:02:12,880 ==================== TRACER ======================
+2021-12-29 06:02:12,881 Channel (server worker num[20]):
+2021-12-29 06:02:12,882 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:02:12,883 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:02:28,044 ==================== TRACER ======================
+2021-12-29 06:02:28,045 Channel (server worker num[20]):
+2021-12-29 06:02:28,045 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:02:28,046 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:02:42,913 ==================== TRACER ======================
+2021-12-29 06:02:42,914 Channel (server worker num[20]):
+2021-12-29 06:02:42,915 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:02:42,915 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:02:58,077 ==================== TRACER ======================
+2021-12-29 06:02:58,077 Channel (server worker num[20]):
+2021-12-29 06:02:58,078 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:02:58,079 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:03:12,946 ==================== TRACER ======================
+2021-12-29 06:03:12,946 Channel (server worker num[20]):
+2021-12-29 06:03:12,947 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:03:12,948 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:03:28,109 ==================== TRACER ======================
+2021-12-29 06:03:28,110 Channel (server worker num[20]):
+2021-12-29 06:03:28,111 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:03:28,112 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:03:58,142 ==================== TRACER ======================
+2021-12-29 06:03:58,143 Channel (server worker num[20]):
+2021-12-29 06:03:58,144 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:03:58,144 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:04:28,175 ==================== TRACER ======================
+2021-12-29 06:04:28,176 Channel (server worker num[20]):
+2021-12-29 06:04:28,176 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:04:28,177 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:04:58,207 ==================== TRACER ======================
+2021-12-29 06:04:58,208 Channel (server worker num[20]):
+2021-12-29 06:04:58,209 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:04:58,210 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:05:28,240 ==================== TRACER ======================
+2021-12-29 06:05:28,241 Channel (server worker num[20]):
+2021-12-29 06:05:28,242 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:05:28,243 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:05:58,273 ==================== TRACER ======================
+2021-12-29 06:05:58,274 Channel (server worker num[20]):
+2021-12-29 06:05:58,275 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:05:58,275 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:06:28,306 ==================== TRACER ======================
+2021-12-29 06:06:28,307 Channel (server worker num[20]):
+2021-12-29 06:06:28,307 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:06:28,308 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:06:58,338 ==================== TRACER ======================
+2021-12-29 06:06:58,339 Channel (server worker num[20]):
+2021-12-29 06:06:58,340 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:06:58,341 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:07:28,371 ==================== TRACER ======================
+2021-12-29 06:07:28,372 Channel (server worker num[20]):
+2021-12-29 06:07:28,373 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:07:28,374 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:07:58,404 ==================== TRACER ======================
+2021-12-29 06:07:58,405 Channel (server worker num[20]):
+2021-12-29 06:07:58,406 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:07:58,406 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:08:28,426 ==================== TRACER ======================
+2021-12-29 06:08:28,427 Channel (server worker num[20]):
+2021-12-29 06:08:28,428 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:08:28,428 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:08:54,465 ==================== TRACER ======================
+2021-12-29 06:08:54,466 Channel (server worker num[20]):
+2021-12-29 06:08:54,468 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:08:54,468 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:08:58,450 ==================== TRACER ======================
+2021-12-29 06:08:58,451 Channel (server worker num[20]):
+2021-12-29 06:08:58,452 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:08:58,452 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:09:24,499 ==================== TRACER ======================
+2021-12-29 06:09:24,500 Channel (server worker num[20]):
+2021-12-29 06:09:24,500 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:09:24,501 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:09:28,483 ==================== TRACER ======================
+2021-12-29 06:09:28,484 Channel (server worker num[20]):
+2021-12-29 06:09:28,484 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:09:28,485 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:09:54,532 ==================== TRACER ======================
+2021-12-29 06:09:54,532 Channel (server worker num[20]):
+2021-12-29 06:09:54,533 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:09:54,534 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:09:58,516 ==================== TRACER ======================
+2021-12-29 06:09:58,516 Channel (server worker num[20]):
+2021-12-29 06:09:58,517 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:09:58,518 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:10:19,913 ==================== TRACER ======================
+2021-12-29 06:10:19,914 Channel (server worker num[20]):
+2021-12-29 06:10:19,917 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:10:19,917 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:10:28,520 ==================== TRACER ======================
+2021-12-29 06:10:28,521 Channel (server worker num[20]):
+2021-12-29 06:10:28,522 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:10:28,522 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:10:49,948 ==================== TRACER ======================
+2021-12-29 06:10:49,949 Channel (server worker num[20]):
+2021-12-29 06:10:49,950 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:10:49,950 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:10:58,553 ==================== TRACER ======================
+2021-12-29 06:10:58,554 Channel (server worker num[20]):
+2021-12-29 06:10:58,555 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:10:58,555 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:11:19,981 ==================== TRACER ======================
+2021-12-29 06:11:19,981 Channel (server worker num[20]):
+2021-12-29 06:11:19,982 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:11:19,983 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:11:28,586 ==================== TRACER ======================
+2021-12-29 06:11:28,586 Channel (server worker num[20]):
+2021-12-29 06:11:28,587 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:11:28,588 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:11:49,985 ==================== TRACER ======================
+2021-12-29 06:11:49,986 Channel (server worker num[20]):
+2021-12-29 06:11:49,986 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:11:49,987 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:11:58,608 ==================== TRACER ======================
+2021-12-29 06:11:58,608 Channel (server worker num[20]):
+2021-12-29 06:11:58,609 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:11:58,610 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:12:09,038 ==================== TRACER ======================
+2021-12-29 06:12:09,039 Channel (server worker num[20]):
+2021-12-29 06:12:09,041 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:12:09,042 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:12:28,640 ==================== TRACER ======================
+2021-12-29 06:12:28,641 Channel (server worker num[20]):
+2021-12-29 06:12:28,642 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:12:28,643 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:12:39,072 ==================== TRACER ======================
+2021-12-29 06:12:39,073 Channel (server worker num[20]):
+2021-12-29 06:12:39,074 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:12:39,075 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:12:47,298 ==================== TRACER ======================
+2021-12-29 06:12:47,299 Channel (server worker num[20]):
+2021-12-29 06:12:47,302 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:12:47,302 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:12:58,673 ==================== TRACER ======================
+2021-12-29 06:12:58,674 Channel (server worker num[20]):
+2021-12-29 06:12:58,675 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:12:58,675 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:13:17,333 ==================== TRACER ======================
+2021-12-29 06:13:17,334 Channel (server worker num[20]):
+2021-12-29 06:13:17,334 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:13:17,335 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:13:28,706 ==================== TRACER ======================
+2021-12-29 06:13:28,707 Channel (server worker num[20]):
+2021-12-29 06:13:28,707 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:13:28,708 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:13:47,365 ==================== TRACER ======================
+2021-12-29 06:13:47,366 Channel (server worker num[20]):
+2021-12-29 06:13:47,367 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:13:47,368 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:13:58,739 ==================== TRACER ======================
+2021-12-29 06:13:58,739 Channel (server worker num[20]):
+2021-12-29 06:13:58,740 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:13:58,741 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:14:17,399 ==================== TRACER ======================
+2021-12-29 06:14:17,399 Channel (server worker num[20]):
+2021-12-29 06:14:17,400 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:14:17,401 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:14:28,771 ==================== TRACER ======================
+2021-12-29 06:14:28,772 Channel (server worker num[20]):
+2021-12-29 06:14:28,773 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:14:28,774 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:14:47,409 ==================== TRACER ======================
+2021-12-29 06:14:47,410 Channel (server worker num[20]):
+2021-12-29 06:14:47,410 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[1/0]
+2021-12-29 06:14:47,411 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:14:58,804 ==================== TRACER ======================
+2021-12-29 06:14:58,805 Channel (server worker num[20]):
+2021-12-29 06:14:58,806 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:14:58,807 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:15:10,573 ==================== TRACER ======================
+2021-12-29 06:15:10,574 Channel (server worker num[20]):
+2021-12-29 06:15:10,577 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:15:10,577 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:15:28,837 ==================== TRACER ======================
+2021-12-29 06:15:28,838 Channel (server worker num[20]):
+2021-12-29 06:15:28,839 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:15:28,839 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:15:40,599 ==================== TRACER ======================
+2021-12-29 06:15:40,600 Channel (server worker num[20]):
+2021-12-29 06:15:40,601 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:15:40,601 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:15:58,870 ==================== TRACER ======================
+2021-12-29 06:15:58,871 Channel (server worker num[20]):
+2021-12-29 06:15:58,871 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:15:58,872 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:16:10,614 ==================== TRACER ======================
+2021-12-29 06:16:10,615 Channel (server worker num[20]):
+2021-12-29 06:16:10,616 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:16:10,616 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:16:28,892 ==================== TRACER ======================
+2021-12-29 06:16:28,893 Channel (server worker num[20]):
+2021-12-29 06:16:28,894 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:16:28,894 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:16:40,623 ==================== TRACER ======================
+2021-12-29 06:16:40,624 Channel (server worker num[20]):
+2021-12-29 06:16:40,625 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:16:40,626 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:16:58,924 ==================== TRACER ======================
+2021-12-29 06:16:58,925 Channel (server worker num[20]):
+2021-12-29 06:16:58,926 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:16:58,927 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:17:10,656 ==================== TRACER ======================
+2021-12-29 06:17:10,657 Channel (server worker num[20]):
+2021-12-29 06:17:10,658 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:17:10,659 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:17:28,957 ==================== TRACER ======================
+2021-12-29 06:17:28,958 Channel (server worker num[20]):
+2021-12-29 06:17:28,959 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:17:28,959 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:17:36,431 ==================== TRACER ======================
+2021-12-29 06:17:36,433 Channel (server worker num[20]):
+2021-12-29 06:17:36,435 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:17:36,436 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:17:58,990 ==================== TRACER ======================
+2021-12-29 06:17:58,991 Channel (server worker num[20]):
+2021-12-29 06:17:58,991 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:17:58,992 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:18:06,466 ==================== TRACER ======================
+2021-12-29 06:18:06,467 Channel (server worker num[20]):
+2021-12-29 06:18:06,468 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:18:06,469 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:18:17,514 ==================== TRACER ======================
+2021-12-29 06:18:17,516 Channel (server worker num[20]):
+2021-12-29 06:18:17,518 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:18:17,518 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:18:29,022 ==================== TRACER ======================
+2021-12-29 06:18:29,023 Channel (server worker num[20]):
+2021-12-29 06:18:29,024 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:18:29,025 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:18:47,549 ==================== TRACER ======================
+2021-12-29 06:18:47,550 Channel (server worker num[20]):
+2021-12-29 06:18:47,550 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:18:47,551 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:18:59,055 ==================== TRACER ======================
+2021-12-29 06:18:59,056 Channel (server worker num[20]):
+2021-12-29 06:18:59,057 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:18:59,058 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:19:17,582 ==================== TRACER ======================
+2021-12-29 06:19:17,583 Channel (server worker num[20]):
+2021-12-29 06:19:17,583 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:19:17,584 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:19:29,064 ==================== TRACER ======================
+2021-12-29 06:19:29,065 Channel (server worker num[20]):
+2021-12-29 06:19:29,066 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:19:29,067 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:19:47,614 ==================== TRACER ======================
+2021-12-29 06:19:47,615 Channel (server worker num[20]):
+2021-12-29 06:19:47,616 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:19:47,617 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:19:57,984 ==================== TRACER ======================
+2021-12-29 06:19:57,986 Channel (server worker num[20]):
+2021-12-29 06:19:57,988 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:19:57,989 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:19:59,080 ==================== TRACER ======================
+2021-12-29 06:19:59,081 Channel (server worker num[20]):
+2021-12-29 06:19:59,082 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:19:59,082 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:20:28,000 ==================== TRACER ======================
+2021-12-29 06:20:28,001 Channel (server worker num[20]):
+2021-12-29 06:20:28,002 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:20:28,003 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:20:29,093 ==================== TRACER ======================
+2021-12-29 06:20:29,094 Channel (server worker num[20]):
+2021-12-29 06:20:29,095 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:20:29,095 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:20:58,032 ==================== TRACER ======================
+2021-12-29 06:20:58,033 Channel (server worker num[20]):
+2021-12-29 06:20:58,034 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:20:58,035 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:20:59,126 ==================== TRACER ======================
+2021-12-29 06:20:59,126 Channel (server worker num[20]):
+2021-12-29 06:20:59,127 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:20:59,128 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:21:28,732 ==================== TRACER ======================
+2021-12-29 06:21:28,733 Channel (server worker num[20]):
+2021-12-29 06:21:28,735 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:21:28,736 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:21:29,158 ==================== TRACER ======================
+2021-12-29 06:21:29,159 Channel (server worker num[20]):
+2021-12-29 06:21:29,160 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:21:29,161 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:21:58,766 ==================== TRACER ======================
+2021-12-29 06:21:58,767 Channel (server worker num[20]):
+2021-12-29 06:21:58,768 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:21:58,768 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:21:59,175 ==================== TRACER ======================
+2021-12-29 06:21:59,175 Channel (server worker num[20]):
+2021-12-29 06:21:59,176 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:21:59,177 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:22:28,799 ==================== TRACER ======================
+2021-12-29 06:22:28,800 Channel (server worker num[20]):
+2021-12-29 06:22:28,801 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:22:28,801 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:22:29,178 ==================== TRACER ======================
+2021-12-29 06:22:29,179 Channel (server worker num[20]):
+2021-12-29 06:22:29,180 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:22:29,180 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:22:58,832 ==================== TRACER ======================
+2021-12-29 06:22:58,832 Channel (server worker num[20]):
+2021-12-29 06:22:58,833 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:22:58,834 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:22:59,211 ==================== TRACER ======================
+2021-12-29 06:22:59,211 Channel (server worker num[20]):
+2021-12-29 06:22:59,212 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:22:59,213 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:23:28,864 ==================== TRACER ======================
+2021-12-29 06:23:28,865 Channel (server worker num[20]):
+2021-12-29 06:23:28,866 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:23:28,867 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:23:29,243 ==================== TRACER ======================
+2021-12-29 06:23:29,244 Channel (server worker num[20]):
+2021-12-29 06:23:29,245 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:23:29,246 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:23:58,897 ==================== TRACER ======================
+2021-12-29 06:23:58,898 Channel (server worker num[20]):
+2021-12-29 06:23:58,899 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:23:58,900 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:23:59,270 ==================== TRACER ======================
+2021-12-29 06:23:59,270 Channel (server worker num[20]):
+2021-12-29 06:23:59,271 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:23:59,272 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:24:29,302 ==================== TRACER ======================
+2021-12-29 06:24:29,303 Channel (server worker num[20]):
+2021-12-29 06:24:29,304 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:24:29,305 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:24:59,335 ==================== TRACER ======================
+2021-12-29 06:24:59,336 Channel (server worker num[20]):
+2021-12-29 06:24:59,337 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:24:59,337 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:25:12,161 ==================== TRACER ======================
+2021-12-29 06:25:12,162 Channel (server worker num[20]):
+2021-12-29 06:25:12,165 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:25:12,166 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:25:29,368 ==================== TRACER ======================
+2021-12-29 06:25:29,368 Channel (server worker num[20]):
+2021-12-29 06:25:29,369 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:25:29,370 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:25:29,547 ==================== TRACER ======================
+2021-12-29 06:25:29,548 Channel (server worker num[20]):
+2021-12-29 06:25:29,550 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:25:29,551 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:25:59,376 ==================== TRACER ======================
+2021-12-29 06:25:59,377 Channel (server worker num[20]):
+2021-12-29 06:25:59,377 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:25:59,378 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:25:59,581 ==================== TRACER ======================
+2021-12-29 06:25:59,582 Channel (server worker num[20]):
+2021-12-29 06:25:59,583 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:25:59,584 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:26:29,409 ==================== TRACER ======================
+2021-12-29 06:26:29,409 Channel (server worker num[20]):
+2021-12-29 06:26:29,410 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:26:29,411 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:26:29,614 ==================== TRACER ======================
+2021-12-29 06:26:29,615 Channel (server worker num[20]):
+2021-12-29 06:26:29,616 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:26:29,617 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:26:59,441 ==================== TRACER ======================
+2021-12-29 06:26:59,442 Channel (server worker num[20]):
+2021-12-29 06:26:59,443 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:26:59,444 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:26:59,648 ==================== TRACER ======================
+2021-12-29 06:26:59,648 Channel (server worker num[20]):
+2021-12-29 06:26:59,649 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:26:59,650 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:27:29,474 ==================== TRACER ======================
+2021-12-29 06:27:29,475 Channel (server worker num[20]):
+2021-12-29 06:27:29,476 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:27:29,476 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:27:29,680 ==================== TRACER ======================
+2021-12-29 06:27:29,681 Channel (server worker num[20]):
+2021-12-29 06:27:29,682 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:27:29,683 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:27:59,507 ==================== TRACER ======================
+2021-12-29 06:27:59,507 Channel (server worker num[20]):
+2021-12-29 06:27:59,508 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:27:59,509 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:27:59,713 ==================== TRACER ======================
+2021-12-29 06:27:59,714 Channel (server worker num[20]):
+2021-12-29 06:27:59,715 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:27:59,716 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:28:29,539 ==================== TRACER ======================
+2021-12-29 06:28:29,540 Channel (server worker num[20]):
+2021-12-29 06:28:29,541 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:28:29,542 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:28:29,746 ==================== TRACER ======================
+2021-12-29 06:28:29,747 Channel (server worker num[20]):
+2021-12-29 06:28:29,748 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:28:29,748 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:28:59,572 ==================== TRACER ======================
+2021-12-29 06:28:59,573 Channel (server worker num[20]):
+2021-12-29 06:28:59,574 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:28:59,574 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:28:59,779 ==================== TRACER ======================
+2021-12-29 06:28:59,780 Channel (server worker num[20]):
+2021-12-29 06:28:59,780 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:28:59,781 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:29:29,605 ==================== TRACER ======================
+2021-12-29 06:29:29,606 Channel (server worker num[20]):
+2021-12-29 06:29:29,606 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:29:29,607 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:29:29,812 ==================== TRACER ======================
+2021-12-29 06:29:29,812 Channel (server worker num[20]):
+2021-12-29 06:29:29,813 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:29:29,814 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:29:59,638 ==================== TRACER ======================
+2021-12-29 06:29:59,638 Channel (server worker num[20]):
+2021-12-29 06:29:59,639 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:29:59,640 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:29:59,844 ==================== TRACER ======================
+2021-12-29 06:29:59,845 Channel (server worker num[20]):
+2021-12-29 06:29:59,846 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:29:59,847 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:30:29,670 ==================== TRACER ======================
+2021-12-29 06:30:29,671 Channel (server worker num[20]):
+2021-12-29 06:30:29,672 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:30:29,673 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:30:29,877 ==================== TRACER ======================
+2021-12-29 06:30:29,878 Channel (server worker num[20]):
+2021-12-29 06:30:29,879 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:30:29,880 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:30:59,703 ==================== TRACER ======================
+2021-12-29 06:30:59,704 Channel (server worker num[20]):
+2021-12-29 06:30:59,705 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:30:59,706 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:30:59,910 ==================== TRACER ======================
+2021-12-29 06:30:59,911 Channel (server worker num[20]):
+2021-12-29 06:30:59,912 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:30:59,912 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:31:29,736 ==================== TRACER ======================
+2021-12-29 06:31:29,737 Channel (server worker num[20]):
+2021-12-29 06:31:29,738 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:31:29,738 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:31:29,943 ==================== TRACER ======================
+2021-12-29 06:31:29,944 Channel (server worker num[20]):
+2021-12-29 06:31:29,945 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:31:29,945 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:31:43,566 ==================== TRACER ======================
+2021-12-29 06:31:43,568 Channel (server worker num[20]):
+2021-12-29 06:31:43,570 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:31:43,571 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:31:59,769 ==================== TRACER ======================
+2021-12-29 06:31:59,770 Channel (server worker num[20]):
+2021-12-29 06:31:59,770 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:31:59,771 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:32:13,591 ==================== TRACER ======================
+2021-12-29 06:32:13,592 Channel (server worker num[20]):
+2021-12-29 06:32:13,593 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:32:13,594 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:32:29,801 ==================== TRACER ======================
+2021-12-29 06:32:29,802 Channel (server worker num[20]):
+2021-12-29 06:32:29,803 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:32:29,804 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:32:31,132 ==================== TRACER ======================
+2021-12-29 06:32:31,134 Channel (server worker num[20]):
+2021-12-29 06:32:31,138 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:32:31,138 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:32:59,834 ==================== TRACER ======================
+2021-12-29 06:32:59,835 Channel (server worker num[20]):
+2021-12-29 06:32:59,836 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:32:59,837 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:33:01,169 ==================== TRACER ======================
+2021-12-29 06:33:01,170 Channel (server worker num[20]):
+2021-12-29 06:33:01,171 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:33:01,171 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:33:05,365 ==================== TRACER ======================
+2021-12-29 06:33:05,368 Channel (server worker num[20]):
+2021-12-29 06:33:05,370 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:33:05,370 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:33:29,848 ==================== TRACER ======================
+2021-12-29 06:33:29,849 Channel (server worker num[20]):
+2021-12-29 06:33:29,850 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:33:29,850 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:33:35,401 ==================== TRACER ======================
+2021-12-29 06:33:35,402 Channel (server worker num[20]):
+2021-12-29 06:33:35,402 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:33:35,403 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:33:59,858 ==================== TRACER ======================
+2021-12-29 06:33:59,859 Channel (server worker num[20]):
+2021-12-29 06:33:59,860 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:33:59,861 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:34:05,424 ==================== TRACER ======================
+2021-12-29 06:34:05,425 Channel (server worker num[20]):
+2021-12-29 06:34:05,428 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:34:05,429 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:34:29,891 ==================== TRACER ======================
+2021-12-29 06:34:29,892 Channel (server worker num[20]):
+2021-12-29 06:34:29,893 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:34:29,894 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:34:35,434 ==================== TRACER ======================
+2021-12-29 06:34:35,435 Channel (server worker num[20]):
+2021-12-29 06:34:35,436 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:34:35,437 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:34:59,924 ==================== TRACER ======================
+2021-12-29 06:34:59,925 Channel (server worker num[20]):
+2021-12-29 06:34:59,925 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:34:59,926 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:35:05,467 ==================== TRACER ======================
+2021-12-29 06:35:05,468 Channel (server worker num[20]):
+2021-12-29 06:35:05,469 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:35:05,470 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:35:29,937 ==================== TRACER ======================
+2021-12-29 06:35:29,938 Channel (server worker num[20]):
+2021-12-29 06:35:29,939 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:35:29,940 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:35:35,500 ==================== TRACER ======================
+2021-12-29 06:35:35,501 Channel (server worker num[20]):
+2021-12-29 06:35:35,502 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:35:35,503 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:35:59,970 ==================== TRACER ======================
+2021-12-29 06:35:59,971 Channel (server worker num[20]):
+2021-12-29 06:35:59,972 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:35:59,972 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:36:05,533 ==================== TRACER ======================
+2021-12-29 06:36:05,534 Channel (server worker num[20]):
+2021-12-29 06:36:05,535 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:36:05,536 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:36:30,003 ==================== TRACER ======================
+2021-12-29 06:36:30,004 Channel (server worker num[20]):
+2021-12-29 06:36:30,005 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:36:30,005 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:36:35,566 ==================== TRACER ======================
+2021-12-29 06:36:35,567 Channel (server worker num[20]):
+2021-12-29 06:36:35,568 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:36:35,569 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:37:00,036 ==================== TRACER ======================
+2021-12-29 06:37:00,036 Channel (server worker num[20]):
+2021-12-29 06:37:00,037 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:37:00,038 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:37:05,599 ==================== TRACER ======================
+2021-12-29 06:37:05,600 Channel (server worker num[20]):
+2021-12-29 06:37:05,601 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:37:05,602 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:37:30,068 ==================== TRACER ======================
+2021-12-29 06:37:30,069 Channel (server worker num[20]):
+2021-12-29 06:37:30,070 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:37:30,071 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:37:35,632 ==================== TRACER ======================
+2021-12-29 06:37:35,633 Channel (server worker num[20]):
+2021-12-29 06:37:35,634 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:37:35,635 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:38:00,100 ==================== TRACER ======================
+2021-12-29 06:38:00,101 Channel (server worker num[20]):
+2021-12-29 06:38:00,101 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:38:00,102 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:38:05,665 ==================== TRACER ======================
+2021-12-29 06:38:05,666 Channel (server worker num[20]):
+2021-12-29 06:38:05,667 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:38:05,668 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:38:30,128 ==================== TRACER ======================
+2021-12-29 06:38:30,129 Channel (server worker num[20]):
+2021-12-29 06:38:30,130 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:38:30,131 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:38:35,697 ==================== TRACER ======================
+2021-12-29 06:38:35,698 Channel (server worker num[20]):
+2021-12-29 06:38:35,699 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:38:35,700 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:39:00,161 ==================== TRACER ======================
+2021-12-29 06:39:00,162 Channel (server worker num[20]):
+2021-12-29 06:39:00,163 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:39:00,163 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:39:05,730 ==================== TRACER ======================
+2021-12-29 06:39:05,731 Channel (server worker num[20]):
+2021-12-29 06:39:05,734 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:39:05,734 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:39:30,194 ==================== TRACER ======================
+2021-12-29 06:39:30,195 Channel (server worker num[20]):
+2021-12-29 06:39:30,195 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:39:30,196 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:39:35,765 ==================== TRACER ======================
+2021-12-29 06:39:35,766 Channel (server worker num[20]):
+2021-12-29 06:39:35,767 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:39:35,767 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:40:00,226 ==================== TRACER ======================
+2021-12-29 06:40:00,227 Channel (server worker num[20]):
+2021-12-29 06:40:00,228 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:40:00,229 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:40:05,798 ==================== TRACER ======================
+2021-12-29 06:40:05,799 Channel (server worker num[20]):
+2021-12-29 06:40:05,799 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:40:05,800 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:40:19,364 ==================== TRACER ======================
+2021-12-29 06:40:19,365 Channel (server worker num[20]):
+2021-12-29 06:40:19,367 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:40:19,368 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:40:30,259 ==================== TRACER ======================
+2021-12-29 06:40:30,260 Channel (server worker num[20]):
+2021-12-29 06:40:30,261 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:40:30,261 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:40:49,398 ==================== TRACER ======================
+2021-12-29 06:40:49,399 DAGExecutor:
+2021-12-29 06:40:49,400 Query count[1]
+2021-12-29 06:40:49,400 QPS[0.03333333333333333 q/s]
+2021-12-29 06:40:49,400 Succ[0.0]
+2021-12-29 06:40:49,400 Error req[0]
+2021-12-29 06:40:49,401 Latency:
+2021-12-29 06:40:49,401 ave[50.848 ms]
+2021-12-29 06:40:49,401 .50[50.848 ms]
+2021-12-29 06:40:49,401 .60[50.848 ms]
+2021-12-29 06:40:49,401 .70[50.848 ms]
+2021-12-29 06:40:49,402 .80[50.848 ms]
+2021-12-29 06:40:49,402 .90[50.848 ms]
+2021-12-29 06:40:49,402 .95[50.848 ms]
+2021-12-29 06:40:49,402 .99[50.848 ms]
+2021-12-29 06:40:49,402 Channel (server worker num[20]):
+2021-12-29 06:40:49,403 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:40:49,404 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:41:00,292 ==================== TRACER ======================
+2021-12-29 06:41:00,292 Channel (server worker num[20]):
+2021-12-29 06:41:00,293 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:41:00,294 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:41:19,434 ==================== TRACER ======================
+2021-12-29 06:41:19,435 Channel (server worker num[20]):
+2021-12-29 06:41:19,436 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:41:19,437 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:41:30,300 ==================== TRACER ======================
+2021-12-29 06:41:30,301 Channel (server worker num[20]):
+2021-12-29 06:41:30,302 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:41:30,303 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:42:00,333 ==================== TRACER ======================
+2021-12-29 06:42:00,334 Channel (server worker num[20]):
+2021-12-29 06:42:00,335 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:42:00,335 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:42:03,488 ==================== TRACER ======================
+2021-12-29 06:42:03,489 Channel (server worker num[20]):
+2021-12-29 06:42:03,491 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:42:03,492 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:42:30,366 ==================== TRACER ======================
+2021-12-29 06:42:30,367 Channel (server worker num[20]):
+2021-12-29 06:42:30,367 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:42:30,368 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:42:33,522 ==================== TRACER ======================
+2021-12-29 06:42:33,523 DAGExecutor:
+2021-12-29 06:42:33,524 Query count[1]
+2021-12-29 06:42:33,524 QPS[0.03333333333333333 q/s]
+2021-12-29 06:42:33,524 Succ[0.0]
+2021-12-29 06:42:33,524 Error req[0]
+2021-12-29 06:42:33,525 Latency:
+2021-12-29 06:42:33,525 ave[44.165 ms]
+2021-12-29 06:42:33,525 .50[44.165 ms]
+2021-12-29 06:42:33,525 .60[44.165 ms]
+2021-12-29 06:42:33,525 .70[44.165 ms]
+2021-12-29 06:42:33,526 .80[44.165 ms]
+2021-12-29 06:42:33,526 .90[44.165 ms]
+2021-12-29 06:42:33,526 .95[44.165 ms]
+2021-12-29 06:42:33,526 .99[44.165 ms]
+2021-12-29 06:42:33,526 Channel (server worker num[20]):
+2021-12-29 06:42:33,527 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:42:33,528 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:42:44,285 ==================== TRACER ======================
+2021-12-29 06:42:44,287 Channel (server worker num[20]):
+2021-12-29 06:42:44,289 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:42:44,289 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:43:00,398 ==================== TRACER ======================
+2021-12-29 06:43:00,399 Channel (server worker num[20]):
+2021-12-29 06:43:00,400 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:43:00,401 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:43:14,300 ==================== TRACER ======================
+2021-12-29 06:43:14,301 DAGExecutor:
+2021-12-29 06:43:14,302 Query count[1]
+2021-12-29 06:43:14,302 QPS[0.03333333333333333 q/s]
+2021-12-29 06:43:14,302 Succ[0.0]
+2021-12-29 06:43:14,302 Error req[0]
+2021-12-29 06:43:14,302 Latency:
+2021-12-29 06:43:14,303 ave[47.774 ms]
+2021-12-29 06:43:14,303 .50[47.774 ms]
+2021-12-29 06:43:14,303 .60[47.774 ms]
+2021-12-29 06:43:14,303 .70[47.774 ms]
+2021-12-29 06:43:14,303 .80[47.774 ms]
+2021-12-29 06:43:14,303 .90[47.774 ms]
+2021-12-29 06:43:14,304 .95[47.774 ms]
+2021-12-29 06:43:14,304 .99[47.774 ms]
+2021-12-29 06:43:14,304 Channel (server worker num[20]):
+2021-12-29 06:43:14,305 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:43:14,306 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:43:30,408 ==================== TRACER ======================
+2021-12-29 06:43:30,409 Channel (server worker num[20]):
+2021-12-29 06:43:30,410 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:43:30,410 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:43:44,336 ==================== TRACER ======================
+2021-12-29 06:43:44,337 Channel (server worker num[20]):
+2021-12-29 06:43:44,338 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:43:44,338 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:44:00,416 ==================== TRACER ======================
+2021-12-29 06:44:00,417 Channel (server worker num[20]):
+2021-12-29 06:44:00,418 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:44:00,418 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:44:14,368 ==================== TRACER ======================
+2021-12-29 06:44:14,369 Channel (server worker num[20]):
+2021-12-29 06:44:14,370 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:44:14,371 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:44:30,449 ==================== TRACER ======================
+2021-12-29 06:44:30,450 Channel (server worker num[20]):
+2021-12-29 06:44:30,450 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:44:30,451 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:44:34,336 ==================== TRACER ======================
+2021-12-29 06:44:34,338 Channel (server worker num[20]):
+2021-12-29 06:44:34,339 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:44:34,340 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:45:00,481 ==================== TRACER ======================
+2021-12-29 06:45:00,482 Channel (server worker num[20]):
+2021-12-29 06:45:00,483 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:45:00,484 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:45:04,370 ==================== TRACER ======================
+2021-12-29 06:45:04,371 DAGExecutor:
+2021-12-29 06:45:04,372 Query count[1]
+2021-12-29 06:45:04,372 QPS[0.03333333333333333 q/s]
+2021-12-29 06:45:04,372 Succ[0.0]
+2021-12-29 06:45:04,372 Error req[0]
+2021-12-29 06:45:04,373 Latency:
+2021-12-29 06:45:04,373 ave[52.253 ms]
+2021-12-29 06:45:04,373 .50[52.253 ms]
+2021-12-29 06:45:04,373 .60[52.253 ms]
+2021-12-29 06:45:04,373 .70[52.253 ms]
+2021-12-29 06:45:04,373 .80[52.253 ms]
+2021-12-29 06:45:04,374 .90[52.253 ms]
+2021-12-29 06:45:04,374 .95[52.253 ms]
+2021-12-29 06:45:04,374 .99[52.253 ms]
+2021-12-29 06:45:04,374 Channel (server worker num[20]):
+2021-12-29 06:45:04,375 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:45:04,376 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:45:30,514 ==================== TRACER ======================
+2021-12-29 06:45:30,515 Channel (server worker num[20]):
+2021-12-29 06:45:30,516 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:45:30,517 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:45:34,406 ==================== TRACER ======================
+2021-12-29 06:45:34,407 Channel (server worker num[20]):
+2021-12-29 06:45:34,408 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:45:34,408 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:46:00,547 ==================== TRACER ======================
+2021-12-29 06:46:00,548 Channel (server worker num[20]):
+2021-12-29 06:46:00,548 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:46:00,549 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:46:04,425 ==================== TRACER ======================
+2021-12-29 06:46:04,426 Channel (server worker num[20]):
+2021-12-29 06:46:04,427 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:46:04,428 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:46:19,150 ==================== TRACER ======================
+2021-12-29 06:46:19,152 Channel (server worker num[20]):
+2021-12-29 06:46:19,155 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:46:19,155 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:46:30,579 ==================== TRACER ======================
+2021-12-29 06:46:30,580 Channel (server worker num[20]):
+2021-12-29 06:46:30,581 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:46:30,582 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:46:49,186 ==================== TRACER ======================
+2021-12-29 06:46:49,187 DAGExecutor:
+2021-12-29 06:46:49,187 Query count[1]
+2021-12-29 06:46:49,188 QPS[0.03333333333333333 q/s]
+2021-12-29 06:46:49,188 Succ[0.0]
+2021-12-29 06:46:49,188 Error req[0]
+2021-12-29 06:46:49,188 Latency:
+2021-12-29 06:46:49,188 ave[98.181 ms]
+2021-12-29 06:46:49,189 .50[98.181 ms]
+2021-12-29 06:46:49,189 .60[98.181 ms]
+2021-12-29 06:46:49,189 .70[98.181 ms]
+2021-12-29 06:46:49,189 .80[98.181 ms]
+2021-12-29 06:46:49,189 .90[98.181 ms]
+2021-12-29 06:46:49,190 .95[98.181 ms]
+2021-12-29 06:46:49,190 .99[98.181 ms]
+2021-12-29 06:46:49,190 Channel (server worker num[20]):
+2021-12-29 06:46:49,191 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:46:49,191 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:47:00,612 ==================== TRACER ======================
+2021-12-29 06:47:00,613 Channel (server worker num[20]):
+2021-12-29 06:47:00,614 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:47:00,614 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:47:19,222 ==================== TRACER ======================
+2021-12-29 06:47:19,223 Channel (server worker num[20]):
+2021-12-29 06:47:19,223 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:47:19,224 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:47:30,639 ==================== TRACER ======================
+2021-12-29 06:47:30,640 Channel (server worker num[20]):
+2021-12-29 06:47:30,641 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:47:30,641 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:47:49,255 ==================== TRACER ======================
+2021-12-29 06:47:49,255 Channel (server worker num[20]):
+2021-12-29 06:47:49,256 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:47:49,257 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:48:00,672 ==================== TRACER ======================
+2021-12-29 06:48:00,673 Channel (server worker num[20]):
+2021-12-29 06:48:00,674 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:48:00,674 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:48:19,287 ==================== TRACER ======================
+2021-12-29 06:48:19,288 Channel (server worker num[20]):
+2021-12-29 06:48:19,289 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:48:19,290 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:48:30,705 ==================== TRACER ======================
+2021-12-29 06:48:30,705 Channel (server worker num[20]):
+2021-12-29 06:48:30,706 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:48:30,707 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:48:49,320 ==================== TRACER ======================
+2021-12-29 06:48:49,321 Channel (server worker num[20]):
+2021-12-29 06:48:49,322 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:48:49,323 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:49:00,737 ==================== TRACER ======================
+2021-12-29 06:49:00,738 Channel (server worker num[20]):
+2021-12-29 06:49:00,739 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:49:00,740 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:49:19,353 ==================== TRACER ======================
+2021-12-29 06:49:19,354 Channel (server worker num[20]):
+2021-12-29 06:49:19,355 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:49:19,355 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:49:30,770 ==================== TRACER ======================
+2021-12-29 06:49:30,771 Channel (server worker num[20]):
+2021-12-29 06:49:30,772 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:49:30,772 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:49:49,386 ==================== TRACER ======================
+2021-12-29 06:49:49,386 Channel (server worker num[20]):
+2021-12-29 06:49:49,387 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:49:49,388 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:50:00,803 ==================== TRACER ======================
+2021-12-29 06:50:00,804 Channel (server worker num[20]):
+2021-12-29 06:50:00,805 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:50:00,805 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:50:19,418 ==================== TRACER ======================
+2021-12-29 06:50:19,419 Channel (server worker num[20]):
+2021-12-29 06:50:19,420 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:50:19,421 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:50:30,808 ==================== TRACER ======================
+2021-12-29 06:50:30,809 Channel (server worker num[20]):
+2021-12-29 06:50:30,810 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:50:30,810 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:50:49,451 ==================== TRACER ======================
+2021-12-29 06:50:49,452 Channel (server worker num[20]):
+2021-12-29 06:50:49,453 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:50:49,454 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:51:00,841 ==================== TRACER ======================
+2021-12-29 06:51:00,841 Channel (server worker num[20]):
+2021-12-29 06:51:00,842 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:51:00,843 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:51:01,177 ==================== TRACER ======================
+2021-12-29 06:51:01,179 Channel (server worker num[20]):
+2021-12-29 06:51:01,181 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:51:01,182 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:51:30,858 ==================== TRACER ======================
+2021-12-29 06:51:30,859 Channel (server worker num[20]):
+2021-12-29 06:51:30,859 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:51:30,860 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:51:31,212 ==================== TRACER ======================
+2021-12-29 06:51:31,213 DAGExecutor:
+2021-12-29 06:51:31,213 Query count[1]
+2021-12-29 06:51:31,214 QPS[0.03333333333333333 q/s]
+2021-12-29 06:51:31,214 Succ[0.0]
+2021-12-29 06:51:31,214 Error req[0]
+2021-12-29 06:51:31,214 Latency:
+2021-12-29 06:51:31,214 ave[1632.321 ms]
+2021-12-29 06:51:31,215 .50[1632.321 ms]
+2021-12-29 06:51:31,215 .60[1632.321 ms]
+2021-12-29 06:51:31,215 .70[1632.321 ms]
+2021-12-29 06:51:31,215 .80[1632.321 ms]
+2021-12-29 06:51:31,215 .90[1632.321 ms]
+2021-12-29 06:51:31,216 .95[1632.321 ms]
+2021-12-29 06:51:31,216 .99[1632.321 ms]
+2021-12-29 06:51:31,216 Channel (server worker num[20]):
+2021-12-29 06:51:31,217 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:51:31,217 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:51:57,055 ==================== TRACER ======================
+2021-12-29 06:51:57,056 Channel (server worker num[20]):
+2021-12-29 06:51:57,059 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:51:57,059 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:52:00,865 ==================== TRACER ======================
+2021-12-29 06:52:00,867 Channel (server worker num[20]):
+2021-12-29 06:52:00,867 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:52:00,868 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:52:13,972 ==================== TRACER ======================
+2021-12-29 06:52:13,974 Channel (server worker num[20]):
+2021-12-29 06:52:13,977 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:52:13,977 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:52:30,883 ==================== TRACER ======================
+2021-12-29 06:52:30,884 Channel (server worker num[20]):
+2021-12-29 06:52:30,885 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:52:30,886 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:52:44,008 ==================== TRACER ======================
+2021-12-29 06:52:44,009 DAGExecutor:
+2021-12-29 06:52:44,009 Query count[1]
+2021-12-29 06:52:44,009 QPS[0.03333333333333333 q/s]
+2021-12-29 06:52:44,009 Succ[0.0]
+2021-12-29 06:52:44,010 Error req[0]
+2021-12-29 06:52:44,010 Latency:
+2021-12-29 06:52:44,010 ave[1637.304 ms]
+2021-12-29 06:52:44,010 .50[1637.304 ms]
+2021-12-29 06:52:44,010 .60[1637.304 ms]
+2021-12-29 06:52:44,011 .70[1637.304 ms]
+2021-12-29 06:52:44,011 .80[1637.304 ms]
+2021-12-29 06:52:44,011 .90[1637.304 ms]
+2021-12-29 06:52:44,011 .95[1637.304 ms]
+2021-12-29 06:52:44,011 .99[1637.304 ms]
+2021-12-29 06:52:44,012 Channel (server worker num[20]):
+2021-12-29 06:52:44,012 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:52:44,013 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:53:00,892 ==================== TRACER ======================
+2021-12-29 06:53:00,893 Channel (server worker num[20]):
+2021-12-29 06:53:00,894 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:53:00,895 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:53:14,044 ==================== TRACER ======================
+2021-12-29 06:53:14,044 Channel (server worker num[20]):
+2021-12-29 06:53:14,045 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:53:14,046 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:53:30,906 ==================== TRACER ======================
+2021-12-29 06:53:30,907 Channel (server worker num[20]):
+2021-12-29 06:53:30,908 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:53:30,909 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:53:44,067 ==================== TRACER ======================
+2021-12-29 06:53:44,068 Channel (server worker num[20]):
+2021-12-29 06:53:44,069 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:53:44,070 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:54:00,738 ==================== TRACER ======================
+2021-12-29 06:54:00,740 Channel (server worker num[20]):
+2021-12-29 06:54:00,742 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:54:00,743 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:54:00,939 ==================== TRACER ======================
+2021-12-29 06:54:00,940 Channel (server worker num[20]):
+2021-12-29 06:54:00,941 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:54:00,941 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:54:30,768 ==================== TRACER ======================
+2021-12-29 06:54:30,769 DAGExecutor:
+2021-12-29 06:54:30,769 Query count[1]
+2021-12-29 06:54:30,769 QPS[0.03333333333333333 q/s]
+2021-12-29 06:54:30,770 Succ[0.0]
+2021-12-29 06:54:30,770 Error req[0]
+2021-12-29 06:54:30,770 Latency:
+2021-12-29 06:54:30,770 ave[1731.024 ms]
+2021-12-29 06:54:30,770 .50[1731.024 ms]
+2021-12-29 06:54:30,770 .60[1731.024 ms]
+2021-12-29 06:54:30,771 .70[1731.024 ms]
+2021-12-29 06:54:30,771 .80[1731.024 ms]
+2021-12-29 06:54:30,771 .90[1731.024 ms]
+2021-12-29 06:54:30,771 .95[1731.024 ms]
+2021-12-29 06:54:30,771 .99[1731.024 ms]
+2021-12-29 06:54:30,772 Channel (server worker num[20]):
+2021-12-29 06:54:30,772 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:54:30,773 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:54:30,972 ==================== TRACER ======================
+2021-12-29 06:54:30,972 Channel (server worker num[20]):
+2021-12-29 06:54:30,973 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:54:30,974 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:55:00,800 ==================== TRACER ======================
+2021-12-29 06:55:00,801 Channel (server worker num[20]):
+2021-12-29 06:55:00,804 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:55:00,804 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:55:01,004 ==================== TRACER ======================
+2021-12-29 06:55:01,005 Channel (server worker num[20]):
+2021-12-29 06:55:01,006 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:55:01,007 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:55:30,835 ==================== TRACER ======================
+2021-12-29 06:55:30,836 Channel (server worker num[20]):
+2021-12-29 06:55:30,836 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:55:30,837 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:55:31,037 ==================== TRACER ======================
+2021-12-29 06:55:31,038 Channel (server worker num[20]):
+2021-12-29 06:55:31,039 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:55:31,040 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:56:00,858 ==================== TRACER ======================
+2021-12-29 06:56:00,859 Channel (server worker num[20]):
+2021-12-29 06:56:00,859 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:56:00,860 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:56:01,063 ==================== TRACER ======================
+2021-12-29 06:56:01,064 Channel (server worker num[20]):
+2021-12-29 06:56:01,065 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:56:01,065 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:56:30,888 ==================== TRACER ======================
+2021-12-29 06:56:30,889 Channel (server worker num[20]):
+2021-12-29 06:56:30,890 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:56:30,891 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:56:31,083 ==================== TRACER ======================
+2021-12-29 06:56:31,084 Channel (server worker num[20]):
+2021-12-29 06:56:31,084 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:56:31,085 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:57:00,912 ==================== TRACER ======================
+2021-12-29 06:57:00,913 Channel (server worker num[20]):
+2021-12-29 06:57:00,914 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:57:00,915 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:57:01,116 ==================== TRACER ======================
+2021-12-29 06:57:01,116 Channel (server worker num[20]):
+2021-12-29 06:57:01,117 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:57:01,118 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:57:30,945 ==================== TRACER ======================
+2021-12-29 06:57:30,946 Channel (server worker num[20]):
+2021-12-29 06:57:30,947 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:57:30,947 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:57:31,148 ==================== TRACER ======================
+2021-12-29 06:57:31,149 Channel (server worker num[20]):
+2021-12-29 06:57:31,150 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:57:31,151 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:58:00,978 ==================== TRACER ======================
+2021-12-29 06:58:00,979 Channel (server worker num[20]):
+2021-12-29 06:58:00,981 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:58:00,982 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:58:01,181 ==================== TRACER ======================
+2021-12-29 06:58:01,182 Channel (server worker num[20]):
+2021-12-29 06:58:01,183 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:58:01,183 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:58:31,012 ==================== TRACER ======================
+2021-12-29 06:58:31,013 Channel (server worker num[20]):
+2021-12-29 06:58:31,014 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:58:31,014 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:58:31,214 ==================== TRACER ======================
+2021-12-29 06:58:31,214 Channel (server worker num[20]):
+2021-12-29 06:58:31,215 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:58:31,216 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:59:01,045 ==================== TRACER ======================
+2021-12-29 06:59:01,046 Channel (server worker num[20]):
+2021-12-29 06:59:01,047 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:59:01,047 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:59:01,246 ==================== TRACER ======================
+2021-12-29 06:59:01,247 Channel (server worker num[20]):
+2021-12-29 06:59:01,248 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:59:01,249 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:59:31,076 ==================== TRACER ======================
+2021-12-29 06:59:31,077 Channel (server worker num[20]):
+2021-12-29 06:59:31,078 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:59:31,079 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 06:59:31,260 ==================== TRACER ======================
+2021-12-29 06:59:31,261 Channel (server worker num[20]):
+2021-12-29 06:59:31,262 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 06:59:31,262 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:00:01,088 ==================== TRACER ======================
+2021-12-29 07:00:01,088 Channel (server worker num[20]):
+2021-12-29 07:00:01,089 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:00:01,090 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:00:01,293 ==================== TRACER ======================
+2021-12-29 07:00:01,294 Channel (server worker num[20]):
+2021-12-29 07:00:01,294 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:00:01,295 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:00:31,120 ==================== TRACER ======================
+2021-12-29 07:00:31,121 Channel (server worker num[20]):
+2021-12-29 07:00:31,122 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:00:31,123 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:00:31,324 ==================== TRACER ======================
+2021-12-29 07:00:31,325 Channel (server worker num[20]):
+2021-12-29 07:00:31,326 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:00:31,326 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:01:01,153 ==================== TRACER ======================
+2021-12-29 07:01:01,154 Channel (server worker num[20]):
+2021-12-29 07:01:01,155 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:01:01,156 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:01:01,357 ==================== TRACER ======================
+2021-12-29 07:01:01,358 Channel (server worker num[20]):
+2021-12-29 07:01:01,358 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:01:01,359 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:01:31,186 ==================== TRACER ======================
+2021-12-29 07:01:31,187 Channel (server worker num[20]):
+2021-12-29 07:01:31,188 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:01:31,188 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:01:31,389 ==================== TRACER ======================
+2021-12-29 07:01:31,390 Channel (server worker num[20]):
+2021-12-29 07:01:31,391 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:01:31,392 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:02:01,219 ==================== TRACER ======================
+2021-12-29 07:02:01,220 Channel (server worker num[20]):
+2021-12-29 07:02:01,220 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:02:01,221 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:02:01,416 ==================== TRACER ======================
+2021-12-29 07:02:01,417 Channel (server worker num[20]):
+2021-12-29 07:02:01,418 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:02:01,418 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:02:31,251 ==================== TRACER ======================
+2021-12-29 07:02:31,252 Channel (server worker num[20]):
+2021-12-29 07:02:31,253 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:02:31,254 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:02:31,448 ==================== TRACER ======================
+2021-12-29 07:02:31,449 Channel (server worker num[20]):
+2021-12-29 07:02:31,450 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:02:31,450 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:03:01,284 ==================== TRACER ======================
+2021-12-29 07:03:01,285 Channel (server worker num[20]):
+2021-12-29 07:03:01,286 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:03:01,287 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:03:01,481 ==================== TRACER ======================
+2021-12-29 07:03:01,481 Channel (server worker num[20]):
+2021-12-29 07:03:01,482 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:03:01,483 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:03:31,316 ==================== TRACER ======================
+2021-12-29 07:03:31,317 Channel (server worker num[20]):
+2021-12-29 07:03:31,318 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:03:31,318 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:03:31,513 ==================== TRACER ======================
+2021-12-29 07:03:31,514 Channel (server worker num[20]):
+2021-12-29 07:03:31,515 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:03:31,516 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:04:01,349 ==================== TRACER ======================
+2021-12-29 07:04:01,350 Channel (server worker num[20]):
+2021-12-29 07:04:01,351 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:04:01,351 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:04:01,546 ==================== TRACER ======================
+2021-12-29 07:04:01,547 Channel (server worker num[20]):
+2021-12-29 07:04:01,548 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:04:01,548 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:04:31,381 ==================== TRACER ======================
+2021-12-29 07:04:31,382 Channel (server worker num[20]):
+2021-12-29 07:04:31,383 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:04:31,384 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:04:31,579 ==================== TRACER ======================
+2021-12-29 07:04:31,579 Channel (server worker num[20]):
+2021-12-29 07:04:31,580 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:04:31,581 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:05:01,414 ==================== TRACER ======================
+2021-12-29 07:05:01,415 Channel (server worker num[20]):
+2021-12-29 07:05:01,416 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:05:01,416 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:05:01,611 ==================== TRACER ======================
+2021-12-29 07:05:01,612 Channel (server worker num[20]):
+2021-12-29 07:05:01,613 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:05:01,614 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:05:31,447 ==================== TRACER ======================
+2021-12-29 07:05:31,448 Channel (server worker num[20]):
+2021-12-29 07:05:31,448 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:05:31,449 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:05:31,644 ==================== TRACER ======================
+2021-12-29 07:05:31,645 Channel (server worker num[20]):
+2021-12-29 07:05:31,646 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:05:31,646 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:06:01,479 ==================== TRACER ======================
+2021-12-29 07:06:01,480 Channel (server worker num[20]):
+2021-12-29 07:06:01,481 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:06:01,482 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:06:01,649 ==================== TRACER ======================
+2021-12-29 07:06:01,650 Channel (server worker num[20]):
+2021-12-29 07:06:01,651 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:06:01,652 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:06:31,512 ==================== TRACER ======================
+2021-12-29 07:06:31,513 Channel (server worker num[20]):
+2021-12-29 07:06:31,514 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:06:31,515 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:06:31,680 ==================== TRACER ======================
+2021-12-29 07:06:31,681 Channel (server worker num[20]):
+2021-12-29 07:06:31,682 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:06:31,682 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:07:01,545 ==================== TRACER ======================
+2021-12-29 07:07:01,546 Channel (server worker num[20]):
+2021-12-29 07:07:01,547 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:07:01,547 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:07:01,713 ==================== TRACER ======================
+2021-12-29 07:07:01,713 Channel (server worker num[20]):
+2021-12-29 07:07:01,714 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:07:01,715 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:07:31,578 ==================== TRACER ======================
+2021-12-29 07:07:31,579 Channel (server worker num[20]):
+2021-12-29 07:07:31,579 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:07:31,580 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:07:31,745 ==================== TRACER ======================
+2021-12-29 07:07:31,746 Channel (server worker num[20]):
+2021-12-29 07:07:31,747 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:07:31,748 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:08:01,610 ==================== TRACER ======================
+2021-12-29 07:08:01,611 Channel (server worker num[20]):
+2021-12-29 07:08:01,612 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:08:01,613 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:08:01,778 ==================== TRACER ======================
+2021-12-29 07:08:01,779 Channel (server worker num[20]):
+2021-12-29 07:08:01,780 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:08:01,780 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:08:31,643 ==================== TRACER ======================
+2021-12-29 07:08:31,644 Channel (server worker num[20]):
+2021-12-29 07:08:31,645 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:08:31,646 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:08:31,803 ==================== TRACER ======================
+2021-12-29 07:08:31,804 Channel (server worker num[20]):
+2021-12-29 07:08:31,804 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:08:31,805 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:09:01,676 ==================== TRACER ======================
+2021-12-29 07:09:01,677 Channel (server worker num[20]):
+2021-12-29 07:09:01,678 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:09:01,678 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:09:01,836 ==================== TRACER ======================
+2021-12-29 07:09:01,836 Channel (server worker num[20]):
+2021-12-29 07:09:01,837 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:09:01,838 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:09:31,709 ==================== TRACER ======================
+2021-12-29 07:09:31,710 Channel (server worker num[20]):
+2021-12-29 07:09:31,710 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:09:31,711 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:09:31,868 ==================== TRACER ======================
+2021-12-29 07:09:31,869 Channel (server worker num[20]):
+2021-12-29 07:09:31,870 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:09:31,871 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:10:01,742 ==================== TRACER ======================
+2021-12-29 07:10:01,742 Channel (server worker num[20]):
+2021-12-29 07:10:01,745 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:10:01,746 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:10:01,888 ==================== TRACER ======================
+2021-12-29 07:10:01,888 Channel (server worker num[20]):
+2021-12-29 07:10:01,889 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:10:01,890 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:10:31,776 ==================== TRACER ======================
+2021-12-29 07:10:31,777 Channel (server worker num[20]):
+2021-12-29 07:10:31,778 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:10:31,778 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:10:31,920 ==================== TRACER ======================
+2021-12-29 07:10:31,921 Channel (server worker num[20]):
+2021-12-29 07:10:31,922 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:10:31,922 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:11:01,809 ==================== TRACER ======================
+2021-12-29 07:11:01,810 Channel (server worker num[20]):
+2021-12-29 07:11:01,810 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:11:01,811 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:11:01,953 ==================== TRACER ======================
+2021-12-29 07:11:01,953 Channel (server worker num[20]):
+2021-12-29 07:11:01,954 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:11:01,955 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:11:31,841 ==================== TRACER ======================
+2021-12-29 07:11:31,842 Channel (server worker num[20]):
+2021-12-29 07:11:31,843 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:11:31,844 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:11:31,985 ==================== TRACER ======================
+2021-12-29 07:11:31,986 Channel (server worker num[20]):
+2021-12-29 07:11:31,987 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:11:31,988 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:12:01,874 ==================== TRACER ======================
+2021-12-29 07:12:01,875 Channel (server worker num[20]):
+2021-12-29 07:12:01,876 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:12:01,876 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:12:02,018 ==================== TRACER ======================
+2021-12-29 07:12:02,019 Channel (server worker num[20]):
+2021-12-29 07:12:02,019 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:12:02,020 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:12:31,885 ==================== TRACER ======================
+2021-12-29 07:12:31,886 Channel (server worker num[20]):
+2021-12-29 07:12:31,887 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:12:31,888 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:12:32,035 ==================== TRACER ======================
+2021-12-29 07:12:32,036 Channel (server worker num[20]):
+2021-12-29 07:12:32,037 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:12:32,038 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:13:01,918 ==================== TRACER ======================
+2021-12-29 07:13:01,919 Channel (server worker num[20]):
+2021-12-29 07:13:01,920 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:13:01,920 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:13:02,068 ==================== TRACER ======================
+2021-12-29 07:13:02,069 Channel (server worker num[20]):
+2021-12-29 07:13:02,069 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:13:02,070 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:13:12,292 ==================== TRACER ======================
+2021-12-29 07:13:12,294 Channel (server worker num[20]):
+2021-12-29 07:13:12,296 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:13:12,297 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:13:32,101 ==================== TRACER ======================
+2021-12-29 07:13:32,101 Channel (server worker num[20]):
+2021-12-29 07:13:32,102 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:13:32,103 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:13:42,327 ==================== TRACER ======================
+2021-12-29 07:13:42,329 DAGExecutor:
+2021-12-29 07:13:42,329 Query count[1]
+2021-12-29 07:13:42,329 QPS[0.03333333333333333 q/s]
+2021-12-29 07:13:42,329 Succ[0.0]
+2021-12-29 07:13:42,329 Error req[0]
+2021-12-29 07:13:42,330 Latency:
+2021-12-29 07:13:42,330 ave[1661.686 ms]
+2021-12-29 07:13:42,330 .50[1661.686 ms]
+2021-12-29 07:13:42,330 .60[1661.686 ms]
+2021-12-29 07:13:42,330 .70[1661.686 ms]
+2021-12-29 07:13:42,331 .80[1661.686 ms]
+2021-12-29 07:13:42,331 .90[1661.686 ms]
+2021-12-29 07:13:42,331 .95[1661.686 ms]
+2021-12-29 07:13:42,331 .99[1661.686 ms]
+2021-12-29 07:13:42,331 Channel (server worker num[20]):
+2021-12-29 07:13:42,332 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:13:42,333 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:14:02,133 ==================== TRACER ======================
+2021-12-29 07:14:02,134 Channel (server worker num[20]):
+2021-12-29 07:14:02,135 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:14:02,136 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:14:12,363 ==================== TRACER ======================
+2021-12-29 07:14:12,364 Channel (server worker num[20]):
+2021-12-29 07:14:12,365 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:14:12,366 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:14:32,166 ==================== TRACER ======================
+2021-12-29 07:14:32,167 Channel (server worker num[20]):
+2021-12-29 07:14:32,168 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:14:32,168 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:14:42,396 ==================== TRACER ======================
+2021-12-29 07:14:42,397 Channel (server worker num[20]):
+2021-12-29 07:14:42,398 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:14:42,398 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:15:02,199 ==================== TRACER ======================
+2021-12-29 07:15:02,200 Channel (server worker num[20]):
+2021-12-29 07:15:02,200 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:15:02,201 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:15:12,429 ==================== TRACER ======================
+2021-12-29 07:15:12,430 Channel (server worker num[20]):
+2021-12-29 07:15:12,430 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:15:12,431 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:15:32,223 ==================== TRACER ======================
+2021-12-29 07:15:32,224 Channel (server worker num[20]):
+2021-12-29 07:15:32,225 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:15:32,226 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:15:42,461 ==================== TRACER ======================
+2021-12-29 07:15:42,462 Channel (server worker num[20]):
+2021-12-29 07:15:42,463 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:15:42,464 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:16:02,228 ==================== TRACER ======================
+2021-12-29 07:16:02,229 Channel (server worker num[20]):
+2021-12-29 07:16:02,230 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:16:02,230 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:16:12,494 ==================== TRACER ======================
+2021-12-29 07:16:12,495 Channel (server worker num[20]):
+2021-12-29 07:16:12,496 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:16:12,496 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:16:27,943 ==================== TRACER ======================
+2021-12-29 07:16:27,945 Channel (server worker num[20]):
+2021-12-29 07:16:27,947 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:16:27,948 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:16:32,261 ==================== TRACER ======================
+2021-12-29 07:16:32,261 Channel (server worker num[20]):
+2021-12-29 07:16:32,262 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:16:32,263 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:16:57,960 ==================== TRACER ======================
+2021-12-29 07:16:57,961 DAGExecutor:
+2021-12-29 07:16:57,962 Query count[1]
+2021-12-29 07:16:57,962 QPS[0.03333333333333333 q/s]
+2021-12-29 07:16:57,962 Succ[0.0]
+2021-12-29 07:16:57,962 Error req[0]
+2021-12-29 07:16:57,962 Latency:
+2021-12-29 07:16:57,962 ave[76.653 ms]
+2021-12-29 07:16:57,963 .50[76.653 ms]
+2021-12-29 07:16:57,963 .60[76.653 ms]
+2021-12-29 07:16:57,963 .70[76.653 ms]
+2021-12-29 07:16:57,963 .80[76.653 ms]
+2021-12-29 07:16:57,963 .90[76.653 ms]
+2021-12-29 07:16:57,963 .95[76.653 ms]
+2021-12-29 07:16:57,964 .99[76.653 ms]
+2021-12-29 07:16:57,964 Channel (server worker num[20]):
+2021-12-29 07:16:57,965 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:16:57,965 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:17:02,293 ==================== TRACER ======================
+2021-12-29 07:17:02,294 Channel (server worker num[20]):
+2021-12-29 07:17:02,295 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:17:02,295 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:17:27,996 ==================== TRACER ======================
+2021-12-29 07:17:27,997 Channel (server worker num[20]):
+2021-12-29 07:17:27,997 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:17:27,998 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:17:32,326 ==================== TRACER ======================
+2021-12-29 07:17:32,327 Channel (server worker num[20]):
+2021-12-29 07:17:32,327 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:17:32,329 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:17:33,622 ==================== TRACER ======================
+2021-12-29 07:17:33,624 Channel (server worker num[20]):
+2021-12-29 07:17:33,626 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:17:33,626 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:18:02,359 ==================== TRACER ======================
+2021-12-29 07:18:02,360 Channel (server worker num[20]):
+2021-12-29 07:18:02,361 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:18:02,362 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:18:03,656 ==================== TRACER ======================
+2021-12-29 07:18:03,658 DAGExecutor:
+2021-12-29 07:18:03,658 Query count[1]
+2021-12-29 07:18:03,658 QPS[0.03333333333333333 q/s]
+2021-12-29 07:18:03,658 Succ[0.0]
+2021-12-29 07:18:03,658 Error req[0]
+2021-12-29 07:18:03,659 Latency:
+2021-12-29 07:18:03,659 ave[1646.997 ms]
+2021-12-29 07:18:03,659 .50[1646.997 ms]
+2021-12-29 07:18:03,659 .60[1646.997 ms]
+2021-12-29 07:18:03,659 .70[1646.997 ms]
+2021-12-29 07:18:03,659 .80[1646.997 ms]
+2021-12-29 07:18:03,660 .90[1646.997 ms]
+2021-12-29 07:18:03,660 .95[1646.997 ms]
+2021-12-29 07:18:03,660 .99[1646.997 ms]
+2021-12-29 07:18:03,660 Channel (server worker num[20]):
+2021-12-29 07:18:03,661 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:18:03,662 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:18:32,392 ==================== TRACER ======================
+2021-12-29 07:18:32,393 Channel (server worker num[20]):
+2021-12-29 07:18:32,394 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:18:32,395 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:18:33,692 ==================== TRACER ======================
+2021-12-29 07:18:33,693 Channel (server worker num[20]):
+2021-12-29 07:18:33,694 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:18:33,694 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:19:02,425 ==================== TRACER ======================
+2021-12-29 07:19:02,426 Channel (server worker num[20]):
+2021-12-29 07:19:02,427 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:19:02,427 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:19:03,718 ==================== TRACER ======================
+2021-12-29 07:19:03,718 Channel (server worker num[20]):
+2021-12-29 07:19:03,719 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:19:03,720 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:19:06,252 ==================== TRACER ======================
+2021-12-29 07:19:06,254 Channel (server worker num[20]):
+2021-12-29 07:19:06,256 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:19:06,256 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:19:32,457 ==================== TRACER ======================
+2021-12-29 07:19:32,458 Channel (server worker num[20]):
+2021-12-29 07:19:32,459 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:19:32,459 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:19:36,278 ==================== TRACER ======================
+2021-12-29 07:19:36,279 DAGExecutor:
+2021-12-29 07:19:36,279 Query count[1]
+2021-12-29 07:19:36,279 QPS[0.03333333333333333 q/s]
+2021-12-29 07:19:36,280 Succ[0.0]
+2021-12-29 07:19:36,280 Error req[0]
+2021-12-29 07:19:36,280 Latency:
+2021-12-29 07:19:36,280 ave[1630.707 ms]
+2021-12-29 07:19:36,280 .50[1630.707 ms]
+2021-12-29 07:19:36,280 .60[1630.707 ms]
+2021-12-29 07:19:36,281 .70[1630.707 ms]
+2021-12-29 07:19:36,281 .80[1630.707 ms]
+2021-12-29 07:19:36,281 .90[1630.707 ms]
+2021-12-29 07:19:36,281 .95[1630.707 ms]
+2021-12-29 07:19:36,281 .99[1630.707 ms]
+2021-12-29 07:19:36,282 Channel (server worker num[20]):
+2021-12-29 07:19:36,282 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:19:36,283 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:20:02,490 ==================== TRACER ======================
+2021-12-29 07:20:02,490 Channel (server worker num[20]):
+2021-12-29 07:20:02,491 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:20:02,492 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:20:06,313 ==================== TRACER ======================
+2021-12-29 07:20:06,314 Channel (server worker num[20]):
+2021-12-29 07:20:06,315 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:20:06,316 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:20:32,520 ==================== TRACER ======================
+2021-12-29 07:20:32,521 Channel (server worker num[20]):
+2021-12-29 07:20:32,522 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:20:32,523 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:20:36,346 ==================== TRACER ======================
+2021-12-29 07:20:36,347 Channel (server worker num[20]):
+2021-12-29 07:20:36,348 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:20:36,349 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:21:02,553 ==================== TRACER ======================
+2021-12-29 07:21:02,554 Channel (server worker num[20]):
+2021-12-29 07:21:02,555 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:21:02,555 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:21:06,379 ==================== TRACER ======================
+2021-12-29 07:21:06,380 Channel (server worker num[20]):
+2021-12-29 07:21:06,381 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:21:06,381 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:21:32,586 ==================== TRACER ======================
+2021-12-29 07:21:32,587 Channel (server worker num[20]):
+2021-12-29 07:21:32,587 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:21:32,588 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:21:36,412 ==================== TRACER ======================
+2021-12-29 07:21:36,413 Channel (server worker num[20]):
+2021-12-29 07:21:36,413 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:21:36,414 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:22:02,618 ==================== TRACER ======================
+2021-12-29 07:22:02,619 Channel (server worker num[20]):
+2021-12-29 07:22:02,620 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:22:02,621 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:22:06,445 ==================== TRACER ======================
+2021-12-29 07:22:06,445 Channel (server worker num[20]):
+2021-12-29 07:22:06,446 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:22:06,447 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:22:32,651 ==================== TRACER ======================
+2021-12-29 07:22:32,652 Channel (server worker num[20]):
+2021-12-29 07:22:32,653 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:22:32,654 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:22:36,477 ==================== TRACER ======================
+2021-12-29 07:22:36,478 Channel (server worker num[20]):
+2021-12-29 07:22:36,479 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:22:36,480 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:23:02,664 ==================== TRACER ======================
+2021-12-29 07:23:02,665 Channel (server worker num[20]):
+2021-12-29 07:23:02,666 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:23:02,666 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:23:06,510 ==================== TRACER ======================
+2021-12-29 07:23:06,511 Channel (server worker num[20]):
+2021-12-29 07:23:06,512 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:23:06,513 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:23:32,680 ==================== TRACER ======================
+2021-12-29 07:23:32,681 Channel (server worker num[20]):
+2021-12-29 07:23:32,682 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:23:32,683 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:23:36,543 ==================== TRACER ======================
+2021-12-29 07:23:36,544 Channel (server worker num[20]):
+2021-12-29 07:23:36,545 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:23:36,545 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:24:02,713 ==================== TRACER ======================
+2021-12-29 07:24:02,714 Channel (server worker num[20]):
+2021-12-29 07:24:02,715 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:24:02,715 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:24:06,576 ==================== TRACER ======================
+2021-12-29 07:24:06,577 Channel (server worker num[20]):
+2021-12-29 07:24:06,577 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:24:06,578 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:24:32,746 ==================== TRACER ======================
+2021-12-29 07:24:32,746 Channel (server worker num[20]):
+2021-12-29 07:24:32,747 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:24:32,748 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:24:36,609 ==================== TRACER ======================
+2021-12-29 07:24:36,609 Channel (server worker num[20]):
+2021-12-29 07:24:36,610 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:24:36,611 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:25:02,778 ==================== TRACER ======================
+2021-12-29 07:25:02,779 Channel (server worker num[20]):
+2021-12-29 07:25:02,780 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:25:02,781 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:25:06,641 ==================== TRACER ======================
+2021-12-29 07:25:06,642 Channel (server worker num[20]):
+2021-12-29 07:25:06,643 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:25:06,644 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:25:32,796 ==================== TRACER ======================
+2021-12-29 07:25:32,797 Channel (server worker num[20]):
+2021-12-29 07:25:32,798 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:25:32,798 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:25:36,674 ==================== TRACER ======================
+2021-12-29 07:25:36,675 Channel (server worker num[20]):
+2021-12-29 07:25:36,676 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:25:36,677 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:26:02,829 ==================== TRACER ======================
+2021-12-29 07:26:02,830 Channel (server worker num[20]):
+2021-12-29 07:26:02,830 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:26:02,831 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:26:06,707 ==================== TRACER ======================
+2021-12-29 07:26:06,708 Channel (server worker num[20]):
+2021-12-29 07:26:06,708 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:26:06,709 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:26:32,861 ==================== TRACER ======================
+2021-12-29 07:26:32,862 Channel (server worker num[20]):
+2021-12-29 07:26:32,863 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:26:32,864 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:26:36,740 ==================== TRACER ======================
+2021-12-29 07:26:36,740 Channel (server worker num[20]):
+2021-12-29 07:26:36,741 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:26:36,742 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:27:02,894 ==================== TRACER ======================
+2021-12-29 07:27:02,895 Channel (server worker num[20]):
+2021-12-29 07:27:02,896 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:27:02,896 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:27:06,772 ==================== TRACER ======================
+2021-12-29 07:27:06,773 Channel (server worker num[20]):
+2021-12-29 07:27:06,774 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:27:06,775 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:27:32,927 ==================== TRACER ======================
+2021-12-29 07:27:32,928 Channel (server worker num[20]):
+2021-12-29 07:27:32,928 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:27:32,929 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:27:36,805 ==================== TRACER ======================
+2021-12-29 07:27:36,806 Channel (server worker num[20]):
+2021-12-29 07:27:36,807 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:27:36,807 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:28:02,959 ==================== TRACER ======================
+2021-12-29 07:28:02,960 Channel (server worker num[20]):
+2021-12-29 07:28:02,961 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:28:02,962 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:28:06,838 ==================== TRACER ======================
+2021-12-29 07:28:06,839 Channel (server worker num[20]):
+2021-12-29 07:28:06,839 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:28:06,840 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:28:32,992 ==================== TRACER ======================
+2021-12-29 07:28:32,993 Channel (server worker num[20]):
+2021-12-29 07:28:32,993 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:28:32,994 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:28:36,871 ==================== TRACER ======================
+2021-12-29 07:28:36,871 Channel (server worker num[20]):
+2021-12-29 07:28:36,872 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:28:36,873 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:29:03,024 ==================== TRACER ======================
+2021-12-29 07:29:03,025 Channel (server worker num[20]):
+2021-12-29 07:29:03,026 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:29:03,027 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:29:06,903 ==================== TRACER ======================
+2021-12-29 07:29:06,904 Channel (server worker num[20]):
+2021-12-29 07:29:06,905 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:29:06,906 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:29:33,057 ==================== TRACER ======================
+2021-12-29 07:29:33,058 Channel (server worker num[20]):
+2021-12-29 07:29:33,059 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:29:33,060 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:29:36,912 ==================== TRACER ======================
+2021-12-29 07:29:36,913 Channel (server worker num[20]):
+2021-12-29 07:29:36,914 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:29:36,914 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:30:03,090 ==================== TRACER ======================
+2021-12-29 07:30:03,091 Channel (server worker num[20]):
+2021-12-29 07:30:03,092 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:30:03,092 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:30:06,945 ==================== TRACER ======================
+2021-12-29 07:30:06,946 Channel (server worker num[20]):
+2021-12-29 07:30:06,946 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:30:06,947 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:30:33,123 ==================== TRACER ======================
+2021-12-29 07:30:33,123 Channel (server worker num[20]):
+2021-12-29 07:30:33,124 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:30:33,125 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:30:36,974 ==================== TRACER ======================
+2021-12-29 07:30:36,975 Channel (server worker num[20]):
+2021-12-29 07:30:36,976 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:30:36,977 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:31:03,155 ==================== TRACER ======================
+2021-12-29 07:31:03,156 Channel (server worker num[20]):
+2021-12-29 07:31:03,157 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:31:03,158 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:31:07,007 ==================== TRACER ======================
+2021-12-29 07:31:07,008 Channel (server worker num[20]):
+2021-12-29 07:31:07,009 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:31:07,009 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:31:33,188 ==================== TRACER ======================
+2021-12-29 07:31:33,189 Channel (server worker num[20]):
+2021-12-29 07:31:33,190 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:31:33,190 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:31:37,029 ==================== TRACER ======================
+2021-12-29 07:31:37,029 Channel (server worker num[20]):
+2021-12-29 07:31:37,030 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:31:37,031 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:32:03,208 ==================== TRACER ======================
+2021-12-29 07:32:03,208 Channel (server worker num[20]):
+2021-12-29 07:32:03,209 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:32:03,210 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:32:07,061 ==================== TRACER ======================
+2021-12-29 07:32:07,062 Channel (server worker num[20]):
+2021-12-29 07:32:07,063 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:32:07,064 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:32:33,217 ==================== TRACER ======================
+2021-12-29 07:32:33,218 Channel (server worker num[20]):
+2021-12-29 07:32:33,219 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:32:33,219 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:32:37,094 ==================== TRACER ======================
+2021-12-29 07:32:37,095 Channel (server worker num[20]):
+2021-12-29 07:32:37,095 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:32:37,096 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:33:03,250 ==================== TRACER ======================
+2021-12-29 07:33:03,251 Channel (server worker num[20]):
+2021-12-29 07:33:03,251 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:33:03,252 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:33:07,116 ==================== TRACER ======================
+2021-12-29 07:33:07,117 Channel (server worker num[20]):
+2021-12-29 07:33:07,118 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:33:07,119 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:33:33,282 ==================== TRACER ======================
+2021-12-29 07:33:33,283 Channel (server worker num[20]):
+2021-12-29 07:33:33,284 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:33:33,285 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:33:37,119 ==================== TRACER ======================
+2021-12-29 07:33:37,120 Channel (server worker num[20]):
+2021-12-29 07:33:37,121 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:33:37,122 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:34:03,312 ==================== TRACER ======================
+2021-12-29 07:34:03,313 Channel (server worker num[20]):
+2021-12-29 07:34:03,314 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:34:03,314 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:34:07,152 ==================== TRACER ======================
+2021-12-29 07:34:07,153 Channel (server worker num[20]):
+2021-12-29 07:34:07,154 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:34:07,154 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:34:33,345 ==================== TRACER ======================
+2021-12-29 07:34:33,346 Channel (server worker num[20]):
+2021-12-29 07:34:33,346 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:34:33,347 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:34:37,185 ==================== TRACER ======================
+2021-12-29 07:34:37,186 Channel (server worker num[20]):
+2021-12-29 07:34:37,186 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:34:37,187 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:35:03,366 ==================== TRACER ======================
+2021-12-29 07:35:03,367 Channel (server worker num[20]):
+2021-12-29 07:35:03,368 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:35:03,369 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:35:07,218 ==================== TRACER ======================
+2021-12-29 07:35:07,218 Channel (server worker num[20]):
+2021-12-29 07:35:07,221 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:35:07,222 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:35:33,378 ==================== TRACER ======================
+2021-12-29 07:35:33,379 Channel (server worker num[20]):
+2021-12-29 07:35:33,380 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:35:33,381 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:35:37,252 ==================== TRACER ======================
+2021-12-29 07:35:37,253 Channel (server worker num[20]):
+2021-12-29 07:35:37,254 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:35:37,255 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:36:03,411 ==================== TRACER ======================
+2021-12-29 07:36:03,412 Channel (server worker num[20]):
+2021-12-29 07:36:03,413 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:36:03,413 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:36:07,257 ==================== TRACER ======================
+2021-12-29 07:36:07,257 Channel (server worker num[20]):
+2021-12-29 07:36:07,260 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:36:07,261 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:36:33,444 ==================== TRACER ======================
+2021-12-29 07:36:33,445 Channel (server worker num[20]):
+2021-12-29 07:36:33,445 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:36:33,446 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:36:37,291 ==================== TRACER ======================
+2021-12-29 07:36:37,292 Channel (server worker num[20]):
+2021-12-29 07:36:37,293 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:36:37,294 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:37:03,477 ==================== TRACER ======================
+2021-12-29 07:37:03,477 Channel (server worker num[20]):
+2021-12-29 07:37:03,478 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:37:03,479 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:37:07,324 ==================== TRACER ======================
+2021-12-29 07:37:07,325 Channel (server worker num[20]):
+2021-12-29 07:37:07,326 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:37:07,326 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:37:33,509 ==================== TRACER ======================
+2021-12-29 07:37:33,510 Channel (server worker num[20]):
+2021-12-29 07:37:33,511 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:37:33,512 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:37:37,357 ==================== TRACER ======================
+2021-12-29 07:37:37,358 Channel (server worker num[20]):
+2021-12-29 07:37:37,358 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:37:37,359 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:38:03,542 ==================== TRACER ======================
+2021-12-29 07:38:03,543 Channel (server worker num[20]):
+2021-12-29 07:38:03,544 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:38:03,545 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:38:07,365 ==================== TRACER ======================
+2021-12-29 07:38:07,365 Channel (server worker num[20]):
+2021-12-29 07:38:07,366 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:38:07,367 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:38:33,565 ==================== TRACER ======================
+2021-12-29 07:38:33,566 Channel (server worker num[20]):
+2021-12-29 07:38:33,567 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:38:33,568 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:38:37,397 ==================== TRACER ======================
+2021-12-29 07:38:37,398 Channel (server worker num[20]):
+2021-12-29 07:38:37,399 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:38:37,400 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:39:03,592 ==================== TRACER ======================
+2021-12-29 07:39:03,593 Channel (server worker num[20]):
+2021-12-29 07:39:03,594 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:39:03,595 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:39:07,430 ==================== TRACER ======================
+2021-12-29 07:39:07,431 Channel (server worker num[20]):
+2021-12-29 07:39:07,432 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:39:07,432 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:39:33,625 ==================== TRACER ======================
+2021-12-29 07:39:33,626 Channel (server worker num[20]):
+2021-12-29 07:39:33,627 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:39:33,628 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:39:37,463 ==================== TRACER ======================
+2021-12-29 07:39:37,464 Channel (server worker num[20]):
+2021-12-29 07:39:37,465 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:39:37,465 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:40:03,658 ==================== TRACER ======================
+2021-12-29 07:40:03,659 Channel (server worker num[20]):
+2021-12-29 07:40:03,660 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:40:03,661 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:40:07,475 ==================== TRACER ======================
+2021-12-29 07:40:07,475 Channel (server worker num[20]):
+2021-12-29 07:40:07,476 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:40:07,477 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:40:33,680 ==================== TRACER ======================
+2021-12-29 07:40:33,681 Channel (server worker num[20]):
+2021-12-29 07:40:33,681 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:40:33,682 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:40:37,507 ==================== TRACER ======================
+2021-12-29 07:40:37,508 Channel (server worker num[20]):
+2021-12-29 07:40:37,509 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:40:37,510 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:41:03,713 ==================== TRACER ======================
+2021-12-29 07:41:03,714 Channel (server worker num[20]):
+2021-12-29 07:41:03,714 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:41:03,715 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:41:07,516 ==================== TRACER ======================
+2021-12-29 07:41:07,517 Channel (server worker num[20]):
+2021-12-29 07:41:07,518 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:41:07,518 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:41:33,744 ==================== TRACER ======================
+2021-12-29 07:41:33,745 Channel (server worker num[20]):
+2021-12-29 07:41:33,746 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:41:33,746 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:41:37,549 ==================== TRACER ======================
+2021-12-29 07:41:37,550 Channel (server worker num[20]):
+2021-12-29 07:41:37,550 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:41:37,551 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:42:11,746 ==================== TRACER ======================
+2021-12-29 07:42:11,748 Channel (server worker num[20]):
+2021-12-29 07:42:11,754 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:42:11,755 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:42:41,785 ==================== TRACER ======================
+2021-12-29 07:42:41,787 DAGExecutor:
+2021-12-29 07:42:41,787 Query count[1]
+2021-12-29 07:42:41,787 QPS[0.03333333333333333 q/s]
+2021-12-29 07:42:41,787 Succ[0.0]
+2021-12-29 07:42:41,787 Error req[0]
+2021-12-29 07:42:41,788 Latency:
+2021-12-29 07:42:41,788 ave[1679.686 ms]
+2021-12-29 07:42:41,788 .50[1679.686 ms]
+2021-12-29 07:42:41,788 .60[1679.686 ms]
+2021-12-29 07:42:41,788 .70[1679.686 ms]
+2021-12-29 07:42:41,788 .80[1679.686 ms]
+2021-12-29 07:42:41,789 .90[1679.686 ms]
+2021-12-29 07:42:41,789 .95[1679.686 ms]
+2021-12-29 07:42:41,789 .99[1679.686 ms]
+2021-12-29 07:42:41,789 Channel (server worker num[20]):
+2021-12-29 07:42:41,790 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:42:41,791 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:43:11,821 ==================== TRACER ======================
+2021-12-29 07:43:11,822 Channel (server worker num[20]):
+2021-12-29 07:43:11,823 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:43:11,823 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:43:41,854 ==================== TRACER ======================
+2021-12-29 07:43:41,855 Channel (server worker num[20]):
+2021-12-29 07:43:41,856 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:43:41,856 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:44:11,858 ==================== TRACER ======================
+2021-12-29 07:44:11,859 Channel (server worker num[20]):
+2021-12-29 07:44:11,859 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:44:11,860 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:44:41,891 ==================== TRACER ======================
+2021-12-29 07:44:41,891 Channel (server worker num[20]):
+2021-12-29 07:44:41,892 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:44:41,893 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:45:11,910 ==================== TRACER ======================
+2021-12-29 07:45:11,911 Channel (server worker num[20]):
+2021-12-29 07:45:11,912 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:45:11,913 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:45:41,943 ==================== TRACER ======================
+2021-12-29 07:45:41,944 Channel (server worker num[20]):
+2021-12-29 07:45:41,945 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:45:41,946 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:46:11,976 ==================== TRACER ======================
+2021-12-29 07:46:11,977 Channel (server worker num[20]):
+2021-12-29 07:46:11,978 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:46:11,979 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:46:41,996 ==================== TRACER ======================
+2021-12-29 07:46:41,997 Channel (server worker num[20]):
+2021-12-29 07:46:41,998 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:46:41,999 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:47:12,015 ==================== TRACER ======================
+2021-12-29 07:47:12,016 Channel (server worker num[20]):
+2021-12-29 07:47:12,016 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:47:12,017 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:47:42,048 ==================== TRACER ======================
+2021-12-29 07:47:42,049 Channel (server worker num[20]):
+2021-12-29 07:47:42,049 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:47:42,050 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:48:12,080 ==================== TRACER ======================
+2021-12-29 07:48:12,081 Channel (server worker num[20]):
+2021-12-29 07:48:12,082 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:48:12,083 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:48:42,113 ==================== TRACER ======================
+2021-12-29 07:48:42,114 Channel (server worker num[20]):
+2021-12-29 07:48:42,115 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:48:42,116 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:49:12,146 ==================== TRACER ======================
+2021-12-29 07:49:12,147 Channel (server worker num[20]):
+2021-12-29 07:49:12,148 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:49:12,149 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:49:42,153 ==================== TRACER ======================
+2021-12-29 07:49:42,154 Channel (server worker num[20]):
+2021-12-29 07:49:42,155 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:49:42,156 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:49:58,546 ==================== TRACER ======================
+2021-12-29 07:49:58,548 Channel (server worker num[20]):
+2021-12-29 07:49:58,550 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:49:58,551 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:50:28,581 ==================== TRACER ======================
+2021-12-29 07:50:28,582 DAGExecutor:
+2021-12-29 07:50:28,582 Query count[1]
+2021-12-29 07:50:28,583 QPS[0.03333333333333333 q/s]
+2021-12-29 07:50:28,583 Succ[0.0]
+2021-12-29 07:50:28,583 Error req[0]
+2021-12-29 07:50:28,583 Latency:
+2021-12-29 07:50:28,583 ave[1878.876 ms]
+2021-12-29 07:50:28,583 .50[1878.876 ms]
+2021-12-29 07:50:28,584 .60[1878.876 ms]
+2021-12-29 07:50:28,584 .70[1878.876 ms]
+2021-12-29 07:50:28,584 .80[1878.876 ms]
+2021-12-29 07:50:28,584 .90[1878.876 ms]
+2021-12-29 07:50:28,584 .95[1878.876 ms]
+2021-12-29 07:50:28,585 .99[1878.876 ms]
+2021-12-29 07:50:28,585 Channel (server worker num[20]):
+2021-12-29 07:50:28,586 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:50:28,586 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:50:58,592 ==================== TRACER ======================
+2021-12-29 07:50:58,593 Channel (server worker num[20]):
+2021-12-29 07:50:58,594 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:50:58,595 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-29 07:51:28,625 ==================== TRACER ======================
+2021-12-29 07:51:28,626 Channel (server worker num[20]):
+2021-12-29 07:51:28,627 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-29 07:51:28,627 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:53:16,350 ==================== TRACER ======================
+2021-12-30 06:53:16,352 Channel (server worker num[20]):
+2021-12-30 06:53:16,353 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:53:16,354 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:53:46,384 ==================== TRACER ======================
+2021-12-30 06:53:46,385 DAGExecutor:
+2021-12-30 06:53:46,386 Query count[1]
+2021-12-30 06:53:46,386 QPS[0.03333333333333333 q/s]
+2021-12-30 06:53:46,386 Succ[0.0]
+2021-12-30 06:53:46,386 Error req[0]
+2021-12-30 06:53:46,386 Latency:
+2021-12-30 06:53:46,387 ave[1711.484 ms]
+2021-12-30 06:53:46,387 .50[1711.484 ms]
+2021-12-30 06:53:46,387 .60[1711.484 ms]
+2021-12-30 06:53:46,387 .70[1711.484 ms]
+2021-12-30 06:53:46,387 .80[1711.484 ms]
+2021-12-30 06:53:46,387 .90[1711.484 ms]
+2021-12-30 06:53:46,388 .95[1711.484 ms]
+2021-12-30 06:53:46,388 .99[1711.484 ms]
+2021-12-30 06:53:46,388 Channel (server worker num[20]):
+2021-12-30 06:53:46,389 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:53:46,389 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:54:16,420 ==================== TRACER ======================
+2021-12-30 06:54:16,421 Channel (server worker num[20]):
+2021-12-30 06:54:16,421 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:54:16,422 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:54:46,453 ==================== TRACER ======================
+2021-12-30 06:54:46,453 Channel (server worker num[20]):
+2021-12-30 06:54:46,454 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:54:46,455 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:55:16,485 ==================== TRACER ======================
+2021-12-30 06:55:16,486 Channel (server worker num[20]):
+2021-12-30 06:55:16,487 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:55:16,488 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:55:46,501 ==================== TRACER ======================
+2021-12-30 06:55:46,502 Channel (server worker num[20]):
+2021-12-30 06:55:46,503 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:55:46,504 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:56:16,522 ==================== TRACER ======================
+2021-12-30 06:56:16,522 Channel (server worker num[20]):
+2021-12-30 06:56:16,523 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:56:16,524 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:56:46,554 ==================== TRACER ======================
+2021-12-30 06:56:46,555 Channel (server worker num[20]):
+2021-12-30 06:56:46,556 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:56:46,557 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:57:16,587 ==================== TRACER ======================
+2021-12-30 06:57:16,588 Channel (server worker num[20]):
+2021-12-30 06:57:16,589 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:57:16,590 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:57:46,613 ==================== TRACER ======================
+2021-12-30 06:57:46,614 Channel (server worker num[20]):
+2021-12-30 06:57:46,615 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:57:46,616 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:58:16,646 ==================== TRACER ======================
+2021-12-30 06:58:16,647 Channel (server worker num[20]):
+2021-12-30 06:58:16,648 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:58:16,649 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:58:46,679 ==================== TRACER ======================
+2021-12-30 06:58:46,680 Channel (server worker num[20]):
+2021-12-30 06:58:46,681 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:58:46,682 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:59:16,712 ==================== TRACER ======================
+2021-12-30 06:59:16,713 Channel (server worker num[20]):
+2021-12-30 06:59:16,714 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:59:16,714 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 06:59:46,745 ==================== TRACER ======================
+2021-12-30 06:59:46,746 Channel (server worker num[20]):
+2021-12-30 06:59:46,746 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 06:59:46,747 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:00:16,778 ==================== TRACER ======================
+2021-12-30 07:00:16,778 Channel (server worker num[20]):
+2021-12-30 07:00:16,779 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:00:16,780 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:00:46,810 ==================== TRACER ======================
+2021-12-30 07:00:46,811 Channel (server worker num[20]):
+2021-12-30 07:00:46,812 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:00:46,813 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:01:16,843 ==================== TRACER ======================
+2021-12-30 07:01:16,844 Channel (server worker num[20]):
+2021-12-30 07:01:16,845 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:01:16,846 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:01:46,876 ==================== TRACER ======================
+2021-12-30 07:01:46,877 Channel (server worker num[20]):
+2021-12-30 07:01:46,878 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:01:46,878 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:02:16,904 ==================== TRACER ======================
+2021-12-30 07:02:16,905 Channel (server worker num[20]):
+2021-12-30 07:02:16,906 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:02:16,907 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:02:46,937 ==================== TRACER ======================
+2021-12-30 07:02:46,938 Channel (server worker num[20]):
+2021-12-30 07:02:46,939 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:02:46,939 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:03:16,970 ==================== TRACER ======================
+2021-12-30 07:03:16,971 Channel (server worker num[20]):
+2021-12-30 07:03:16,971 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:03:16,972 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:03:46,974 ==================== TRACER ======================
+2021-12-30 07:03:46,975 Channel (server worker num[20]):
+2021-12-30 07:03:46,976 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:03:46,977 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:04:17,007 ==================== TRACER ======================
+2021-12-30 07:04:17,008 Channel (server worker num[20]):
+2021-12-30 07:04:17,009 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:04:17,010 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:04:47,040 ==================== TRACER ======================
+2021-12-30 07:04:47,041 Channel (server worker num[20]):
+2021-12-30 07:04:47,042 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:04:47,043 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:05:17,073 ==================== TRACER ======================
+2021-12-30 07:05:17,074 Channel (server worker num[20]):
+2021-12-30 07:05:17,075 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:05:17,075 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:05:47,106 ==================== TRACER ======================
+2021-12-30 07:05:47,107 Channel (server worker num[20]):
+2021-12-30 07:05:47,107 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:05:47,108 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:06:17,138 ==================== TRACER ======================
+2021-12-30 07:06:17,139 Channel (server worker num[20]):
+2021-12-30 07:06:17,140 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:06:17,141 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:06:47,171 ==================== TRACER ======================
+2021-12-30 07:06:47,172 Channel (server worker num[20]):
+2021-12-30 07:06:47,173 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:06:47,174 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:07:17,204 ==================== TRACER ======================
+2021-12-30 07:07:17,205 Channel (server worker num[20]):
+2021-12-30 07:07:17,206 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:07:17,207 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:07:47,237 ==================== TRACER ======================
+2021-12-30 07:07:47,238 Channel (server worker num[20]):
+2021-12-30 07:07:47,239 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:07:47,239 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:08:17,270 ==================== TRACER ======================
+2021-12-30 07:08:17,270 Channel (server worker num[20]):
+2021-12-30 07:08:17,271 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:08:17,272 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:08:47,272 ==================== TRACER ======================
+2021-12-30 07:08:47,273 Channel (server worker num[20]):
+2021-12-30 07:08:47,274 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:08:47,275 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:09:17,305 ==================== TRACER ======================
+2021-12-30 07:09:17,306 Channel (server worker num[20]):
+2021-12-30 07:09:17,307 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:09:17,308 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:09:47,338 ==================== TRACER ======================
+2021-12-30 07:09:47,339 Channel (server worker num[20]):
+2021-12-30 07:09:47,340 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:09:47,340 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:10:17,371 ==================== TRACER ======================
+2021-12-30 07:10:17,372 Channel (server worker num[20]):
+2021-12-30 07:10:17,373 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:10:17,373 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:10:47,404 ==================== TRACER ======================
+2021-12-30 07:10:47,405 Channel (server worker num[20]):
+2021-12-30 07:10:47,405 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:10:47,406 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:11:17,436 ==================== TRACER ======================
+2021-12-30 07:11:17,437 Channel (server worker num[20]):
+2021-12-30 07:11:17,438 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:11:17,439 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:11:47,469 ==================== TRACER ======================
+2021-12-30 07:11:47,470 Channel (server worker num[20]):
+2021-12-30 07:11:47,471 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:11:47,472 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:12:17,502 ==================== TRACER ======================
+2021-12-30 07:12:17,503 Channel (server worker num[20]):
+2021-12-30 07:12:17,504 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:12:17,505 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:12:47,535 ==================== TRACER ======================
+2021-12-30 07:12:47,536 Channel (server worker num[20]):
+2021-12-30 07:12:47,537 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:12:47,537 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:13:17,568 ==================== TRACER ======================
+2021-12-30 07:13:17,569 Channel (server worker num[20]):
+2021-12-30 07:13:17,569 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:13:17,570 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:13:47,601 ==================== TRACER ======================
+2021-12-30 07:13:47,601 Channel (server worker num[20]):
+2021-12-30 07:13:47,603 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:13:47,603 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:14:17,634 ==================== TRACER ======================
+2021-12-30 07:14:17,635 Channel (server worker num[20]):
+2021-12-30 07:14:17,635 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:14:17,636 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:14:47,667 ==================== TRACER ======================
+2021-12-30 07:14:47,667 Channel (server worker num[20]):
+2021-12-30 07:14:47,668 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:14:47,669 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:15:17,699 ==================== TRACER ======================
+2021-12-30 07:15:17,700 Channel (server worker num[20]):
+2021-12-30 07:15:17,701 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:15:17,702 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:15:47,732 ==================== TRACER ======================
+2021-12-30 07:15:47,733 Channel (server worker num[20]):
+2021-12-30 07:15:47,734 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:15:47,735 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:16:17,765 ==================== TRACER ======================
+2021-12-30 07:16:17,766 Channel (server worker num[20]):
+2021-12-30 07:16:17,767 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:16:17,767 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:16:47,798 ==================== TRACER ======================
+2021-12-30 07:16:47,799 Channel (server worker num[20]):
+2021-12-30 07:16:47,799 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:16:47,800 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:17:17,831 ==================== TRACER ======================
+2021-12-30 07:17:17,831 Channel (server worker num[20]):
+2021-12-30 07:17:17,832 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:17:17,833 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:17:47,863 ==================== TRACER ======================
+2021-12-30 07:17:47,864 Channel (server worker num[20]):
+2021-12-30 07:17:47,865 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:17:47,865 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:18:17,896 ==================== TRACER ======================
+2021-12-30 07:18:17,897 Channel (server worker num[20]):
+2021-12-30 07:18:17,898 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:18:17,898 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:18:47,929 ==================== TRACER ======================
+2021-12-30 07:18:47,929 Channel (server worker num[20]):
+2021-12-30 07:18:47,930 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:18:47,931 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:19:17,961 ==================== TRACER ======================
+2021-12-30 07:19:17,962 Channel (server worker num[20]):
+2021-12-30 07:19:17,963 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:19:17,964 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:19:47,994 ==================== TRACER ======================
+2021-12-30 07:19:47,995 Channel (server worker num[20]):
+2021-12-30 07:19:47,996 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:19:47,996 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:20:18,027 ==================== TRACER ======================
+2021-12-30 07:20:18,028 Channel (server worker num[20]):
+2021-12-30 07:20:18,029 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:20:18,029 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:20:48,060 ==================== TRACER ======================
+2021-12-30 07:20:48,061 Channel (server worker num[20]):
+2021-12-30 07:20:48,061 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:20:48,062 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:21:18,092 ==================== TRACER ======================
+2021-12-30 07:21:18,093 Channel (server worker num[20]):
+2021-12-30 07:21:18,094 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:21:18,095 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:21:48,125 ==================== TRACER ======================
+2021-12-30 07:21:48,126 Channel (server worker num[20]):
+2021-12-30 07:21:48,127 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:21:48,128 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:22:18,158 ==================== TRACER ======================
+2021-12-30 07:22:18,159 Channel (server worker num[20]):
+2021-12-30 07:22:18,160 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:22:18,161 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:22:48,191 ==================== TRACER ======================
+2021-12-30 07:22:48,192 Channel (server worker num[20]):
+2021-12-30 07:22:48,193 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:22:48,193 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:23:18,224 ==================== TRACER ======================
+2021-12-30 07:23:18,225 Channel (server worker num[20]):
+2021-12-30 07:23:18,225 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:23:18,226 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:23:48,256 ==================== TRACER ======================
+2021-12-30 07:23:48,257 Channel (server worker num[20]):
+2021-12-30 07:23:48,258 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:23:48,259 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:24:18,289 ==================== TRACER ======================
+2021-12-30 07:24:18,290 Channel (server worker num[20]):
+2021-12-30 07:24:18,291 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:24:18,292 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:24:48,322 ==================== TRACER ======================
+2021-12-30 07:24:48,323 Channel (server worker num[20]):
+2021-12-30 07:24:48,324 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:24:48,325 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:25:18,355 ==================== TRACER ======================
+2021-12-30 07:25:18,356 Channel (server worker num[20]):
+2021-12-30 07:25:18,358 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:25:18,359 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:25:48,389 ==================== TRACER ======================
+2021-12-30 07:25:48,390 Channel (server worker num[20]):
+2021-12-30 07:25:48,391 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:25:48,392 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:26:18,422 ==================== TRACER ======================
+2021-12-30 07:26:18,423 Channel (server worker num[20]):
+2021-12-30 07:26:18,424 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:26:18,425 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:26:48,455 ==================== TRACER ======================
+2021-12-30 07:26:48,456 Channel (server worker num[20]):
+2021-12-30 07:26:48,457 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:26:48,457 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:27:18,488 ==================== TRACER ======================
+2021-12-30 07:27:18,489 Channel (server worker num[20]):
+2021-12-30 07:27:18,489 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:27:18,490 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:27:48,520 ==================== TRACER ======================
+2021-12-30 07:27:48,521 Channel (server worker num[20]):
+2021-12-30 07:27:48,522 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:27:48,523 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:28:18,553 ==================== TRACER ======================
+2021-12-30 07:28:18,554 Channel (server worker num[20]):
+2021-12-30 07:28:18,555 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:28:18,556 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:28:48,586 ==================== TRACER ======================
+2021-12-30 07:28:48,587 Channel (server worker num[20]):
+2021-12-30 07:28:48,588 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:28:48,588 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:29:18,619 ==================== TRACER ======================
+2021-12-30 07:29:18,620 Channel (server worker num[20]):
+2021-12-30 07:29:18,621 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:29:18,621 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:29:48,628 ==================== TRACER ======================
+2021-12-30 07:29:48,629 Channel (server worker num[20]):
+2021-12-30 07:29:48,630 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:29:48,630 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:30:18,661 ==================== TRACER ======================
+2021-12-30 07:30:18,662 Channel (server worker num[20]):
+2021-12-30 07:30:18,662 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:30:18,663 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:30:48,694 ==================== TRACER ======================
+2021-12-30 07:30:48,695 Channel (server worker num[20]):
+2021-12-30 07:30:48,695 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:30:48,696 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:31:18,726 ==================== TRACER ======================
+2021-12-30 07:31:18,727 Channel (server worker num[20]):
+2021-12-30 07:31:18,728 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:31:18,729 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:31:48,759 ==================== TRACER ======================
+2021-12-30 07:31:48,760 Channel (server worker num[20]):
+2021-12-30 07:31:48,761 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:31:48,762 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:32:18,792 ==================== TRACER ======================
+2021-12-30 07:32:18,793 Channel (server worker num[20]):
+2021-12-30 07:32:18,794 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:32:18,794 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:32:48,796 ==================== TRACER ======================
+2021-12-30 07:32:48,797 Channel (server worker num[20]):
+2021-12-30 07:32:48,798 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:32:48,799 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:33:18,829 ==================== TRACER ======================
+2021-12-30 07:33:18,830 Channel (server worker num[20]):
+2021-12-30 07:33:18,831 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:33:18,831 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:33:48,862 ==================== TRACER ======================
+2021-12-30 07:33:48,863 Channel (server worker num[20]):
+2021-12-30 07:33:48,863 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:33:48,864 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:34:18,884 ==================== TRACER ======================
+2021-12-30 07:34:18,885 Channel (server worker num[20]):
+2021-12-30 07:34:18,886 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:34:18,887 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:34:48,917 ==================== TRACER ======================
+2021-12-30 07:34:48,918 Channel (server worker num[20]):
+2021-12-30 07:34:48,919 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:34:48,919 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:35:18,950 ==================== TRACER ======================
+2021-12-30 07:35:18,951 Channel (server worker num[20]):
+2021-12-30 07:35:18,951 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:35:18,952 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:35:48,983 ==================== TRACER ======================
+2021-12-30 07:35:48,983 Channel (server worker num[20]):
+2021-12-30 07:35:48,984 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:35:48,985 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:36:19,015 ==================== TRACER ======================
+2021-12-30 07:36:19,016 Channel (server worker num[20]):
+2021-12-30 07:36:19,017 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:36:19,018 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:36:49,048 ==================== TRACER ======================
+2021-12-30 07:36:49,049 Channel (server worker num[20]):
+2021-12-30 07:36:49,050 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:36:49,051 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:37:19,081 ==================== TRACER ======================
+2021-12-30 07:37:19,082 Channel (server worker num[20]):
+2021-12-30 07:37:19,083 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:37:19,083 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:37:49,114 ==================== TRACER ======================
+2021-12-30 07:37:49,115 Channel (server worker num[20]):
+2021-12-30 07:37:49,115 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:37:49,116 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:38:19,147 ==================== TRACER ======================
+2021-12-30 07:38:19,147 Channel (server worker num[20]):
+2021-12-30 07:38:19,148 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:38:19,149 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:38:49,179 ==================== TRACER ======================
+2021-12-30 07:38:49,180 Channel (server worker num[20]):
+2021-12-30 07:38:49,181 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:38:49,182 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:39:19,212 ==================== TRACER ======================
+2021-12-30 07:39:19,213 Channel (server worker num[20]):
+2021-12-30 07:39:19,214 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:39:19,215 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:39:49,245 ==================== TRACER ======================
+2021-12-30 07:39:49,246 Channel (server worker num[20]):
+2021-12-30 07:39:49,247 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:39:49,248 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:40:19,278 ==================== TRACER ======================
+2021-12-30 07:40:19,279 Channel (server worker num[20]):
+2021-12-30 07:40:19,280 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:40:19,280 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:40:49,311 ==================== TRACER ======================
+2021-12-30 07:40:49,312 Channel (server worker num[20]):
+2021-12-30 07:40:49,312 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:40:49,313 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:41:19,343 ==================== TRACER ======================
+2021-12-30 07:41:19,344 Channel (server worker num[20]):
+2021-12-30 07:41:19,345 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:41:19,346 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:41:49,376 ==================== TRACER ======================
+2021-12-30 07:41:49,377 Channel (server worker num[20]):
+2021-12-30 07:41:49,378 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:41:49,379 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:42:19,389 ==================== TRACER ======================
+2021-12-30 07:42:19,390 Channel (server worker num[20]):
+2021-12-30 07:42:19,391 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:42:19,392 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:42:49,422 ==================== TRACER ======================
+2021-12-30 07:42:49,423 Channel (server worker num[20]):
+2021-12-30 07:42:49,424 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:42:49,425 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:43:19,455 ==================== TRACER ======================
+2021-12-30 07:43:19,456 Channel (server worker num[20]):
+2021-12-30 07:43:19,457 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:43:19,458 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:43:49,488 ==================== TRACER ======================
+2021-12-30 07:43:49,489 Channel (server worker num[20]):
+2021-12-30 07:43:49,490 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:43:49,491 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:44:19,521 ==================== TRACER ======================
+2021-12-30 07:44:19,522 Channel (server worker num[20]):
+2021-12-30 07:44:19,523 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:44:19,523 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:44:49,554 ==================== TRACER ======================
+2021-12-30 07:44:49,555 Channel (server worker num[20]):
+2021-12-30 07:44:49,555 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:44:49,556 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:45:19,587 ==================== TRACER ======================
+2021-12-30 07:45:19,587 Channel (server worker num[20]):
+2021-12-30 07:45:19,588 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:45:19,589 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:45:49,619 ==================== TRACER ======================
+2021-12-30 07:45:49,620 Channel (server worker num[20]):
+2021-12-30 07:45:49,621 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:45:49,622 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:46:19,652 ==================== TRACER ======================
+2021-12-30 07:46:19,653 Channel (server worker num[20]):
+2021-12-30 07:46:19,654 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:46:19,655 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:46:49,685 ==================== TRACER ======================
+2021-12-30 07:46:49,686 Channel (server worker num[20]):
+2021-12-30 07:46:49,687 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:46:49,687 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:47:19,700 ==================== TRACER ======================
+2021-12-30 07:47:19,701 Channel (server worker num[20]):
+2021-12-30 07:47:19,702 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:47:19,703 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:47:49,733 ==================== TRACER ======================
+2021-12-30 07:47:49,734 Channel (server worker num[20]):
+2021-12-30 07:47:49,735 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:47:49,735 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:48:19,766 ==================== TRACER ======================
+2021-12-30 07:48:19,767 Channel (server worker num[20]):
+2021-12-30 07:48:19,768 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:48:19,768 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:48:49,799 ==================== TRACER ======================
+2021-12-30 07:48:49,800 Channel (server worker num[20]):
+2021-12-30 07:48:49,800 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:48:49,801 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:49:19,831 ==================== TRACER ======================
+2021-12-30 07:49:19,832 Channel (server worker num[20]):
+2021-12-30 07:49:19,833 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:49:19,834 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:49:49,864 ==================== TRACER ======================
+2021-12-30 07:49:49,865 Channel (server worker num[20]):
+2021-12-30 07:49:49,866 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:49:49,867 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:50:19,897 ==================== TRACER ======================
+2021-12-30 07:50:19,898 Channel (server worker num[20]):
+2021-12-30 07:50:19,899 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:50:19,900 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:50:49,930 ==================== TRACER ======================
+2021-12-30 07:50:49,931 Channel (server worker num[20]):
+2021-12-30 07:50:49,932 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:50:49,932 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:51:19,963 ==================== TRACER ======================
+2021-12-30 07:51:19,964 Channel (server worker num[20]):
+2021-12-30 07:51:19,964 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:51:19,965 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:51:49,996 ==================== TRACER ======================
+2021-12-30 07:51:49,996 Channel (server worker num[20]):
+2021-12-30 07:51:49,997 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:51:49,998 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:52:20,028 ==================== TRACER ======================
+2021-12-30 07:52:20,029 Channel (server worker num[20]):
+2021-12-30 07:52:20,030 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:52:20,031 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:52:50,061 ==================== TRACER ======================
+2021-12-30 07:52:50,062 Channel (server worker num[20]):
+2021-12-30 07:52:50,063 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:52:50,064 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:53:20,094 ==================== TRACER ======================
+2021-12-30 07:53:20,095 Channel (server worker num[20]):
+2021-12-30 07:53:20,096 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:53:20,096 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:53:50,127 ==================== TRACER ======================
+2021-12-30 07:53:50,128 Channel (server worker num[20]):
+2021-12-30 07:53:50,129 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:53:50,129 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:54:20,160 ==================== TRACER ======================
+2021-12-30 07:54:20,160 Channel (server worker num[20]):
+2021-12-30 07:54:20,161 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:54:20,162 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:54:50,192 ==================== TRACER ======================
+2021-12-30 07:54:50,193 Channel (server worker num[20]):
+2021-12-30 07:54:50,194 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:54:50,195 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:55:20,225 ==================== TRACER ======================
+2021-12-30 07:55:20,226 Channel (server worker num[20]):
+2021-12-30 07:55:20,227 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:55:20,227 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:55:50,258 ==================== TRACER ======================
+2021-12-30 07:55:50,259 Channel (server worker num[20]):
+2021-12-30 07:55:50,259 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:55:50,260 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:56:20,291 ==================== TRACER ======================
+2021-12-30 07:56:20,291 Channel (server worker num[20]):
+2021-12-30 07:56:20,292 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:56:20,293 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:56:50,323 ==================== TRACER ======================
+2021-12-30 07:56:50,324 Channel (server worker num[20]):
+2021-12-30 07:56:50,325 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:56:50,326 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:57:06,917 ==================== TRACER ======================
+2021-12-30 07:57:06,918 Channel (server worker num[20]):
+2021-12-30 07:57:06,920 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:57:06,921 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:57:36,951 ==================== TRACER ======================
+2021-12-30 07:57:36,953 DAGExecutor:
+2021-12-30 07:57:36,953 Query count[1]
+2021-12-30 07:57:36,953 QPS[0.03333333333333333 q/s]
+2021-12-30 07:57:36,953 Succ[0.0]
+2021-12-30 07:57:36,953 Error req[0]
+2021-12-30 07:57:36,954 Latency:
+2021-12-30 07:57:36,954 ave[1767.715 ms]
+2021-12-30 07:57:36,954 .50[1767.715 ms]
+2021-12-30 07:57:36,954 .60[1767.715 ms]
+2021-12-30 07:57:36,954 .70[1767.715 ms]
+2021-12-30 07:57:36,954 .80[1767.715 ms]
+2021-12-30 07:57:36,955 .90[1767.715 ms]
+2021-12-30 07:57:36,955 .95[1767.715 ms]
+2021-12-30 07:57:36,955 .99[1767.715 ms]
+2021-12-30 07:57:36,955 Channel (server worker num[20]):
+2021-12-30 07:57:36,956 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:57:36,957 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:58:06,987 ==================== TRACER ======================
+2021-12-30 07:58:06,988 Channel (server worker num[20]):
+2021-12-30 07:58:06,989 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:58:06,989 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:58:37,020 ==================== TRACER ======================
+2021-12-30 07:58:37,021 Channel (server worker num[20]):
+2021-12-30 07:58:37,021 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:58:37,022 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:59:07,053 ==================== TRACER ======================
+2021-12-30 07:59:07,053 Channel (server worker num[20]):
+2021-12-30 07:59:07,054 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:59:07,055 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 07:59:37,085 ==================== TRACER ======================
+2021-12-30 07:59:37,086 Channel (server worker num[20]):
+2021-12-30 07:59:37,087 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 07:59:37,088 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:00:07,118 ==================== TRACER ======================
+2021-12-30 08:00:07,119 Channel (server worker num[20]):
+2021-12-30 08:00:07,120 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:00:07,120 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:00:37,151 ==================== TRACER ======================
+2021-12-30 08:00:37,152 Channel (server worker num[20]):
+2021-12-30 08:00:37,152 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:00:37,153 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:01:07,184 ==================== TRACER ======================
+2021-12-30 08:01:07,184 Channel (server worker num[20]):
+2021-12-30 08:01:07,185 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:01:07,186 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:01:37,216 ==================== TRACER ======================
+2021-12-30 08:01:37,217 Channel (server worker num[20]):
+2021-12-30 08:01:37,218 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:01:37,219 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:02:07,249 ==================== TRACER ======================
+2021-12-30 08:02:07,250 Channel (server worker num[20]):
+2021-12-30 08:02:07,251 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:02:07,251 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:02:37,282 ==================== TRACER ======================
+2021-12-30 08:02:37,282 Channel (server worker num[20]):
+2021-12-30 08:02:37,283 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:02:37,284 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:03:07,314 ==================== TRACER ======================
+2021-12-30 08:03:07,315 Channel (server worker num[20]):
+2021-12-30 08:03:07,316 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:03:07,317 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:03:37,347 ==================== TRACER ======================
+2021-12-30 08:03:37,348 Channel (server worker num[20]):
+2021-12-30 08:03:37,349 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:03:37,349 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:04:07,380 ==================== TRACER ======================
+2021-12-30 08:04:07,381 Channel (server worker num[20]):
+2021-12-30 08:04:07,382 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:04:07,382 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:04:37,390 ==================== TRACER ======================
+2021-12-30 08:04:37,390 Channel (server worker num[20]):
+2021-12-30 08:04:37,391 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:04:37,392 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:05:07,422 ==================== TRACER ======================
+2021-12-30 08:05:07,423 Channel (server worker num[20]):
+2021-12-30 08:05:07,424 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:05:07,425 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:05:37,455 ==================== TRACER ======================
+2021-12-30 08:05:37,456 Channel (server worker num[20]):
+2021-12-30 08:05:37,457 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:05:37,458 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:06:07,488 ==================== TRACER ======================
+2021-12-30 08:06:07,489 Channel (server worker num[20]):
+2021-12-30 08:06:07,490 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:06:07,490 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:06:37,516 ==================== TRACER ======================
+2021-12-30 08:06:37,517 Channel (server worker num[20]):
+2021-12-30 08:06:37,518 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:06:37,518 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:07:07,549 ==================== TRACER ======================
+2021-12-30 08:07:07,550 Channel (server worker num[20]):
+2021-12-30 08:07:07,550 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:07:07,551 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:07:37,581 ==================== TRACER ======================
+2021-12-30 08:07:37,582 Channel (server worker num[20]):
+2021-12-30 08:07:37,583 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:07:37,584 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:08:07,614 ==================== TRACER ======================
+2021-12-30 08:08:07,615 Channel (server worker num[20]):
+2021-12-30 08:08:07,616 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:08:07,617 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:08:37,647 ==================== TRACER ======================
+2021-12-30 08:08:37,648 Channel (server worker num[20]):
+2021-12-30 08:08:37,649 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:08:37,650 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:09:07,680 ==================== TRACER ======================
+2021-12-30 08:09:07,681 Channel (server worker num[20]):
+2021-12-30 08:09:07,682 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:09:07,682 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:09:37,709 ==================== TRACER ======================
+2021-12-30 08:09:37,710 Channel (server worker num[20]):
+2021-12-30 08:09:37,710 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:09:37,711 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:10:07,741 ==================== TRACER ======================
+2021-12-30 08:10:07,742 Channel (server worker num[20]):
+2021-12-30 08:10:07,743 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:10:07,744 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:10:37,774 ==================== TRACER ======================
+2021-12-30 08:10:37,775 Channel (server worker num[20]):
+2021-12-30 08:10:37,776 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:10:37,777 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:11:07,807 ==================== TRACER ======================
+2021-12-30 08:11:07,808 Channel (server worker num[20]):
+2021-12-30 08:11:07,809 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:11:07,810 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:11:37,840 ==================== TRACER ======================
+2021-12-30 08:11:37,841 Channel (server worker num[20]):
+2021-12-30 08:11:37,842 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:11:37,842 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:12:07,244 ==================== TRACER ======================
+2021-12-30 08:12:07,246 Channel (server worker num[20]):
+2021-12-30 08:12:07,247 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:12:07,248 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:12:37,278 ==================== TRACER ======================
+2021-12-30 08:12:37,280 DAGExecutor:
+2021-12-30 08:12:37,280 Query count[1]
+2021-12-30 08:12:37,280 QPS[0.03333333333333333 q/s]
+2021-12-30 08:12:37,280 Succ[0.0]
+2021-12-30 08:12:37,281 Error req[0]
+2021-12-30 08:12:37,281 Latency:
+2021-12-30 08:12:37,281 ave[1666.015 ms]
+2021-12-30 08:12:37,281 .50[1666.015 ms]
+2021-12-30 08:12:37,281 .60[1666.015 ms]
+2021-12-30 08:12:37,281 .70[1666.015 ms]
+2021-12-30 08:12:37,282 .80[1666.015 ms]
+2021-12-30 08:12:37,282 .90[1666.015 ms]
+2021-12-30 08:12:37,282 .95[1666.015 ms]
+2021-12-30 08:12:37,282 .99[1666.015 ms]
+2021-12-30 08:12:37,282 Channel (server worker num[20]):
+2021-12-30 08:12:37,283 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:12:37,284 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:13:07,314 ==================== TRACER ======================
+2021-12-30 08:13:07,315 Channel (server worker num[20]):
+2021-12-30 08:13:07,316 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:13:07,317 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:13:37,347 ==================== TRACER ======================
+2021-12-30 08:13:37,348 Channel (server worker num[20]):
+2021-12-30 08:13:37,349 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:13:37,349 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:13:45,787 ==================== TRACER ======================
+2021-12-30 08:13:45,789 Channel (server worker num[20]):
+2021-12-30 08:13:45,792 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:13:45,793 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:14:15,823 ==================== TRACER ======================
+2021-12-30 08:14:15,824 DAGExecutor:
+2021-12-30 08:14:15,825 Query count[1]
+2021-12-30 08:14:15,825 QPS[0.03333333333333333 q/s]
+2021-12-30 08:14:15,825 Succ[0.0]
+2021-12-30 08:14:15,825 Error req[0]
+2021-12-30 08:14:15,825 Latency:
+2021-12-30 08:14:15,826 ave[1659.131 ms]
+2021-12-30 08:14:15,826 .50[1659.131 ms]
+2021-12-30 08:14:15,826 .60[1659.131 ms]
+2021-12-30 08:14:15,826 .70[1659.131 ms]
+2021-12-30 08:14:15,826 .80[1659.131 ms]
+2021-12-30 08:14:15,826 .90[1659.131 ms]
+2021-12-30 08:14:15,827 .95[1659.131 ms]
+2021-12-30 08:14:15,827 .99[1659.131 ms]
+2021-12-30 08:14:15,827 Channel (server worker num[20]):
+2021-12-30 08:14:15,828 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:14:15,828 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:14:45,859 ==================== TRACER ======================
+2021-12-30 08:14:45,860 Channel (server worker num[20]):
+2021-12-30 08:14:45,860 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:14:45,861 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:15:15,892 ==================== TRACER ======================
+2021-12-30 08:15:15,892 Channel (server worker num[20]):
+2021-12-30 08:15:15,893 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:15:15,894 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:15:45,924 ==================== TRACER ======================
+2021-12-30 08:15:45,925 Channel (server worker num[20]):
+2021-12-30 08:15:45,926 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:15:45,927 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:16:15,957 ==================== TRACER ======================
+2021-12-30 08:16:15,958 Channel (server worker num[20]):
+2021-12-30 08:16:15,959 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:16:15,959 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:16:45,990 ==================== TRACER ======================
+2021-12-30 08:16:45,991 Channel (server worker num[20]):
+2021-12-30 08:16:45,992 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:16:45,992 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:17:15,596 ==================== TRACER ======================
+2021-12-30 08:17:15,598 Channel (server worker num[20]):
+2021-12-30 08:17:15,600 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:17:15,601 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:17:31,013 ==================== TRACER ======================
+2021-12-30 08:17:31,015 Channel (server worker num[20]):
+2021-12-30 08:17:31,017 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:17:31,018 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:18:01,049 ==================== TRACER ======================
+2021-12-30 08:18:01,050 DAGExecutor:
+2021-12-30 08:18:01,050 Query count[1]
+2021-12-30 08:18:01,050 QPS[0.03333333333333333 q/s]
+2021-12-30 08:18:01,050 Succ[0.0]
+2021-12-30 08:18:01,051 Error req[0]
+2021-12-30 08:18:01,051 Latency:
+2021-12-30 08:18:01,051 ave[1718.021 ms]
+2021-12-30 08:18:01,051 .50[1718.021 ms]
+2021-12-30 08:18:01,051 .60[1718.021 ms]
+2021-12-30 08:18:01,051 .70[1718.021 ms]
+2021-12-30 08:18:01,052 .80[1718.021 ms]
+2021-12-30 08:18:01,052 .90[1718.021 ms]
+2021-12-30 08:18:01,052 .95[1718.021 ms]
+2021-12-30 08:18:01,052 .99[1718.021 ms]
+2021-12-30 08:18:01,052 Channel (server worker num[20]):
+2021-12-30 08:18:01,053 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:18:01,054 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:18:31,084 ==================== TRACER ======================
+2021-12-30 08:18:31,085 Channel (server worker num[20]):
+2021-12-30 08:18:31,086 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:18:31,087 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:19:01,117 ==================== TRACER ======================
+2021-12-30 08:19:01,118 Channel (server worker num[20]):
+2021-12-30 08:19:01,119 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:19:01,120 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:19:31,150 ==================== TRACER ======================
+2021-12-30 08:19:31,151 Channel (server worker num[20]):
+2021-12-30 08:19:31,152 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:19:31,152 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:20:01,183 ==================== TRACER ======================
+2021-12-30 08:20:01,184 Channel (server worker num[20]):
+2021-12-30 08:20:01,184 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:20:01,185 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:20:13,303 ==================== TRACER ======================
+2021-12-30 08:20:13,304 Channel (server worker num[20]):
+2021-12-30 08:20:13,306 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:20:13,307 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:20:43,337 ==================== TRACER ======================
+2021-12-30 08:20:43,339 DAGExecutor:
+2021-12-30 08:20:43,339 Query count[1]
+2021-12-30 08:20:43,339 QPS[0.03333333333333333 q/s]
+2021-12-30 08:20:43,339 Succ[0.0]
+2021-12-30 08:20:43,339 Error req[0]
+2021-12-30 08:20:43,340 Latency:
+2021-12-30 08:20:43,340 ave[1664.843 ms]
+2021-12-30 08:20:43,340 .50[1664.843 ms]
+2021-12-30 08:20:43,340 .60[1664.843 ms]
+2021-12-30 08:20:43,340 .70[1664.843 ms]
+2021-12-30 08:20:43,341 .80[1664.843 ms]
+2021-12-30 08:20:43,341 .90[1664.843 ms]
+2021-12-30 08:20:43,341 .95[1664.843 ms]
+2021-12-30 08:20:43,341 .99[1664.843 ms]
+2021-12-30 08:20:43,341 Channel (server worker num[20]):
+2021-12-30 08:20:43,342 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:20:43,343 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:21:13,373 ==================== TRACER ======================
+2021-12-30 08:21:13,374 Channel (server worker num[20]):
+2021-12-30 08:21:13,375 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:21:13,376 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:31:54,883 ==================== TRACER ======================
+2021-12-30 08:31:54,885 Channel (server worker num[20]):
+2021-12-30 08:31:54,887 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:31:54,888 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:32:24,900 ==================== TRACER ======================
+2021-12-30 08:32:24,901 DAGExecutor:
+2021-12-30 08:32:24,902 Query count[1]
+2021-12-30 08:32:24,902 QPS[0.03333333333333333 q/s]
+2021-12-30 08:32:24,902 Succ[0.0]
+2021-12-30 08:32:24,902 Error req[0]
+2021-12-30 08:32:24,902 Latency:
+2021-12-30 08:32:24,903 ave[1698.932 ms]
+2021-12-30 08:32:24,903 .50[1698.932 ms]
+2021-12-30 08:32:24,903 .60[1698.932 ms]
+2021-12-30 08:32:24,903 .70[1698.932 ms]
+2021-12-30 08:32:24,903 .80[1698.932 ms]
+2021-12-30 08:32:24,903 .90[1698.932 ms]
+2021-12-30 08:32:24,904 .95[1698.932 ms]
+2021-12-30 08:32:24,904 .99[1698.932 ms]
+2021-12-30 08:32:24,904 Channel (server worker num[20]):
+2021-12-30 08:32:24,905 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:32:24,906 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:32:54,936 ==================== TRACER ======================
+2021-12-30 08:32:54,937 Channel (server worker num[20]):
+2021-12-30 08:32:54,938 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:32:54,938 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:33:24,969 ==================== TRACER ======================
+2021-12-30 08:33:24,970 Channel (server worker num[20]):
+2021-12-30 08:33:24,970 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:33:24,971 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:33:55,002 ==================== TRACER ======================
+2021-12-30 08:33:55,002 Channel (server worker num[20]):
+2021-12-30 08:33:55,003 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:33:55,004 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:34:21,528 ==================== TRACER ======================
+2021-12-30 08:34:21,530 Channel (server worker num[20]):
+2021-12-30 08:34:21,533 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:34:21,534 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:34:51,564 ==================== TRACER ======================
+2021-12-30 08:34:51,565 DAGExecutor:
+2021-12-30 08:34:51,566 Query count[1]
+2021-12-30 08:34:51,566 QPS[0.03333333333333333 q/s]
+2021-12-30 08:34:51,566 Succ[0.0]
+2021-12-30 08:34:51,566 Error req[0]
+2021-12-30 08:34:51,566 Latency:
+2021-12-30 08:34:51,566 ave[1726.27 ms]
+2021-12-30 08:34:51,567 .50[1726.27 ms]
+2021-12-30 08:34:51,567 .60[1726.27 ms]
+2021-12-30 08:34:51,567 .70[1726.27 ms]
+2021-12-30 08:34:51,567 .80[1726.27 ms]
+2021-12-30 08:34:51,567 .90[1726.27 ms]
+2021-12-30 08:34:51,567 .95[1726.27 ms]
+2021-12-30 08:34:51,568 .99[1726.27 ms]
+2021-12-30 08:34:51,568 Channel (server worker num[20]):
+2021-12-30 08:34:51,569 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:34:51,569 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:35:21,600 ==================== TRACER ======================
+2021-12-30 08:35:21,601 Channel (server worker num[20]):
+2021-12-30 08:35:21,602 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:35:21,602 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:35:51,633 ==================== TRACER ======================
+2021-12-30 08:35:51,634 Channel (server worker num[20]):
+2021-12-30 08:35:51,635 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:35:51,635 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:36:21,666 ==================== TRACER ======================
+2021-12-30 08:36:21,667 Channel (server worker num[20]):
+2021-12-30 08:36:21,668 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:36:21,668 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:36:51,699 ==================== TRACER ======================
+2021-12-30 08:36:51,700 Channel (server worker num[20]):
+2021-12-30 08:36:51,701 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:36:51,701 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:37:21,732 ==================== TRACER ======================
+2021-12-30 08:37:21,733 Channel (server worker num[20]):
+2021-12-30 08:37:21,734 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:37:21,734 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:37:51,765 ==================== TRACER ======================
+2021-12-30 08:37:51,766 Channel (server worker num[20]):
+2021-12-30 08:37:51,767 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:37:51,767 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:43:32,381 ==================== TRACER ======================
+2021-12-30 08:43:32,383 Channel (server worker num[20]):
+2021-12-30 08:43:32,386 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:43:32,387 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:44:02,417 ==================== TRACER ======================
+2021-12-30 08:44:02,418 DAGExecutor:
+2021-12-30 08:44:02,419 Query count[1]
+2021-12-30 08:44:02,419 QPS[0.03333333333333333 q/s]
+2021-12-30 08:44:02,419 Succ[0.0]
+2021-12-30 08:44:02,419 Error req[0]
+2021-12-30 08:44:02,419 Latency:
+2021-12-30 08:44:02,419 ave[1687.363 ms]
+2021-12-30 08:44:02,420 .50[1687.363 ms]
+2021-12-30 08:44:02,420 .60[1687.363 ms]
+2021-12-30 08:44:02,420 .70[1687.363 ms]
+2021-12-30 08:44:02,420 .80[1687.363 ms]
+2021-12-30 08:44:02,420 .90[1687.363 ms]
+2021-12-30 08:44:02,420 .95[1687.363 ms]
+2021-12-30 08:44:02,421 .99[1687.363 ms]
+2021-12-30 08:44:02,421 Channel (server worker num[20]):
+2021-12-30 08:44:02,422 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:44:02,422 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:44:32,453 ==================== TRACER ======================
+2021-12-30 08:44:32,453 Channel (server worker num[20]):
+2021-12-30 08:44:32,454 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:44:32,455 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:45:02,485 ==================== TRACER ======================
+2021-12-30 08:45:02,486 Channel (server worker num[20]):
+2021-12-30 08:45:02,487 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:45:02,488 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:45:32,518 ==================== TRACER ======================
+2021-12-30 08:45:32,519 Channel (server worker num[20]):
+2021-12-30 08:45:32,520 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:45:32,521 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:46:02,551 ==================== TRACER ======================
+2021-12-30 08:46:02,552 Channel (server worker num[20]):
+2021-12-30 08:46:02,553 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:46:02,553 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:46:32,584 ==================== TRACER ======================
+2021-12-30 08:46:32,585 Channel (server worker num[20]):
+2021-12-30 08:46:32,585 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:46:32,586 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:47:02,616 ==================== TRACER ======================
+2021-12-30 08:47:02,617 Channel (server worker num[20]):
+2021-12-30 08:47:02,618 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:47:02,619 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:47:32,649 ==================== TRACER ======================
+2021-12-30 08:47:32,650 Channel (server worker num[20]):
+2021-12-30 08:47:32,651 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:47:32,652 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:48:02,682 ==================== TRACER ======================
+2021-12-30 08:48:02,683 Channel (server worker num[20]):
+2021-12-30 08:48:02,684 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:48:02,684 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:48:32,715 ==================== TRACER ======================
+2021-12-30 08:48:32,716 Channel (server worker num[20]):
+2021-12-30 08:48:32,717 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:48:32,717 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:49:02,748 ==================== TRACER ======================
+2021-12-30 08:49:02,749 Channel (server worker num[20]):
+2021-12-30 08:49:02,749 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:49:02,750 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:49:32,396 ==================== TRACER ======================
+2021-12-30 08:49:32,398 Channel (server worker num[20]):
+2021-12-30 08:49:32,400 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:49:32,401 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:50:02,431 ==================== TRACER ======================
+2021-12-30 08:50:02,432 DAGExecutor:
+2021-12-30 08:50:02,433 Query count[1]
+2021-12-30 08:50:02,433 QPS[0.03333333333333333 q/s]
+2021-12-30 08:50:02,433 Succ[0.0]
+2021-12-30 08:50:02,433 Error req[0]
+2021-12-30 08:50:02,433 Latency:
+2021-12-30 08:50:02,433 ave[1679.848 ms]
+2021-12-30 08:50:02,434 .50[1679.848 ms]
+2021-12-30 08:50:02,434 .60[1679.848 ms]
+2021-12-30 08:50:02,434 .70[1679.848 ms]
+2021-12-30 08:50:02,434 .80[1679.848 ms]
+2021-12-30 08:50:02,434 .90[1679.848 ms]
+2021-12-30 08:50:02,434 .95[1679.848 ms]
+2021-12-30 08:50:02,435 .99[1679.848 ms]
+2021-12-30 08:50:02,435 Channel (server worker num[20]):
+2021-12-30 08:50:02,436 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:50:02,436 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:50:32,467 ==================== TRACER ======================
+2021-12-30 08:50:32,468 Channel (server worker num[20]):
+2021-12-30 08:50:32,468 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:50:32,469 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:51:02,497 ==================== TRACER ======================
+2021-12-30 08:51:02,498 Channel (server worker num[20]):
+2021-12-30 08:51:02,499 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:51:02,500 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:51:32,530 ==================== TRACER ======================
+2021-12-30 08:51:32,531 Channel (server worker num[20]):
+2021-12-30 08:51:32,532 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:51:32,532 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:52:02,563 ==================== TRACER ======================
+2021-12-30 08:52:02,564 Channel (server worker num[20]):
+2021-12-30 08:52:02,565 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:52:02,565 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:52:32,596 ==================== TRACER ======================
+2021-12-30 08:52:32,596 Channel (server worker num[20]):
+2021-12-30 08:52:32,597 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:52:32,598 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:52:42,481 ==================== TRACER ======================
+2021-12-30 08:52:42,484 Channel (server worker num[20]):
+2021-12-30 08:52:42,486 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:52:42,486 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:53:12,517 ==================== TRACER ======================
+2021-12-30 08:53:12,518 DAGExecutor:
+2021-12-30 08:53:12,518 Query count[1]
+2021-12-30 08:53:12,518 QPS[0.03333333333333333 q/s]
+2021-12-30 08:53:12,519 Succ[0.0]
+2021-12-30 08:53:12,519 Error req[0]
+2021-12-30 08:53:12,519 Latency:
+2021-12-30 08:53:12,519 ave[1709.757 ms]
+2021-12-30 08:53:12,519 .50[1709.757 ms]
+2021-12-30 08:53:12,519 .60[1709.757 ms]
+2021-12-30 08:53:12,520 .70[1709.757 ms]
+2021-12-30 08:53:12,520 .80[1709.757 ms]
+2021-12-30 08:53:12,520 .90[1709.757 ms]
+2021-12-30 08:53:12,520 .95[1709.757 ms]
+2021-12-30 08:53:12,520 .99[1709.757 ms]
+2021-12-30 08:53:12,520 Channel (server worker num[20]):
+2021-12-30 08:53:12,521 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:53:12,522 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:53:42,552 ==================== TRACER ======================
+2021-12-30 08:53:42,553 Channel (server worker num[20]):
+2021-12-30 08:53:42,554 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:53:42,555 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:54:12,585 ==================== TRACER ======================
+2021-12-30 08:54:12,586 Channel (server worker num[20]):
+2021-12-30 08:54:12,587 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:54:12,587 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:54:42,618 ==================== TRACER ======================
+2021-12-30 08:54:42,619 Channel (server worker num[20]):
+2021-12-30 08:54:42,620 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:54:42,620 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 08:59:43,531 ==================== TRACER ======================
+2021-12-30 08:59:43,533 Channel (server worker num[20]):
+2021-12-30 08:59:43,535 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 08:59:43,536 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:00:13,566 ==================== TRACER ======================
+2021-12-30 09:00:13,567 Channel (server worker num[20]):
+2021-12-30 09:00:13,568 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:00:13,569 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:00:43,599 ==================== TRACER ======================
+2021-12-30 09:00:43,600 Channel (server worker num[20]):
+2021-12-30 09:00:43,601 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:00:43,602 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:01:13,632 ==================== TRACER ======================
+2021-12-30 09:01:13,633 Channel (server worker num[20]):
+2021-12-30 09:01:13,634 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:01:13,634 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:01:43,665 ==================== TRACER ======================
+2021-12-30 09:01:43,666 Channel (server worker num[20]):
+2021-12-30 09:01:43,666 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:01:43,667 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:02:13,697 ==================== TRACER ======================
+2021-12-30 09:02:13,698 Channel (server worker num[20]):
+2021-12-30 09:02:13,699 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:02:13,700 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:02:43,708 ==================== TRACER ======================
+2021-12-30 09:02:43,709 Channel (server worker num[20]):
+2021-12-30 09:02:43,710 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:02:43,711 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:03:10,066 ==================== TRACER ======================
+2021-12-30 09:03:10,068 Channel (server worker num[20]):
+2021-12-30 09:03:10,071 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:03:10,071 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:03:12,019 ==================== TRACER ======================
+2021-12-30 09:03:12,021 Channel (server worker num[20]):
+2021-12-30 09:03:12,023 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:03:12,024 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:03:42,055 ==================== TRACER ======================
+2021-12-30 09:03:42,056 DAGExecutor:
+2021-12-30 09:03:42,056 Query count[1]
+2021-12-30 09:03:42,056 QPS[0.03333333333333333 q/s]
+2021-12-30 09:03:42,057 Succ[0.0]
+2021-12-30 09:03:42,057 Error req[0]
+2021-12-30 09:03:42,057 Latency:
+2021-12-30 09:03:42,057 ave[1915.476 ms]
+2021-12-30 09:03:42,057 .50[1915.476 ms]
+2021-12-30 09:03:42,057 .60[1915.476 ms]
+2021-12-30 09:03:42,058 .70[1915.476 ms]
+2021-12-30 09:03:42,058 .80[1915.476 ms]
+2021-12-30 09:03:42,058 .90[1915.476 ms]
+2021-12-30 09:03:42,058 .95[1915.476 ms]
+2021-12-30 09:03:42,058 .99[1915.476 ms]
+2021-12-30 09:03:42,058 Channel (server worker num[20]):
+2021-12-30 09:03:42,059 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:03:42,060 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:04:12,090 ==================== TRACER ======================
+2021-12-30 09:04:12,091 Channel (server worker num[20]):
+2021-12-30 09:04:12,092 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:04:12,093 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:04:42,120 ==================== TRACER ======================
+2021-12-30 09:04:42,121 Channel (server worker num[20]):
+2021-12-30 09:04:42,122 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:04:42,123 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:05:12,153 ==================== TRACER ======================
+2021-12-30 09:05:12,154 Channel (server worker num[20]):
+2021-12-30 09:05:12,155 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:05:12,155 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:05:42,186 ==================== TRACER ======================
+2021-12-30 09:05:42,187 Channel (server worker num[20]):
+2021-12-30 09:05:42,188 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:05:42,188 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:06:12,219 ==================== TRACER ======================
+2021-12-30 09:06:12,220 Channel (server worker num[20]):
+2021-12-30 09:06:12,220 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:06:12,221 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:06:42,251 ==================== TRACER ======================
+2021-12-30 09:06:42,252 Channel (server worker num[20]):
+2021-12-30 09:06:42,253 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:06:42,254 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:07:12,284 ==================== TRACER ======================
+2021-12-30 09:07:12,285 Channel (server worker num[20]):
+2021-12-30 09:07:12,286 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:07:12,287 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:07:27,496 ==================== TRACER ======================
+2021-12-30 09:07:27,498 Channel (server worker num[20]):
+2021-12-30 09:07:27,500 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:07:27,501 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:07:48,868 ==================== TRACER ======================
+2021-12-30 09:07:48,871 Channel (server worker num[20]):
+2021-12-30 09:07:48,872 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:07:48,873 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:08:06,615 ==================== TRACER ======================
+2021-12-30 09:08:06,618 Channel (server worker num[20]):
+2021-12-30 09:08:06,620 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:08:06,620 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:08:36,651 ==================== TRACER ======================
+2021-12-30 09:08:36,652 DAGExecutor:
+2021-12-30 09:08:36,652 Query count[1]
+2021-12-30 09:08:36,652 QPS[0.03333333333333333 q/s]
+2021-12-30 09:08:36,653 Succ[0.0]
+2021-12-30 09:08:36,653 Error req[0]
+2021-12-30 09:08:36,653 Latency:
+2021-12-30 09:08:36,653 ave[1661.09 ms]
+2021-12-30 09:08:36,653 .50[1661.09 ms]
+2021-12-30 09:08:36,654 .60[1661.09 ms]
+2021-12-30 09:08:36,654 .70[1661.09 ms]
+2021-12-30 09:08:36,654 .80[1661.09 ms]
+2021-12-30 09:08:36,654 .90[1661.09 ms]
+2021-12-30 09:08:36,654 .95[1661.09 ms]
+2021-12-30 09:08:36,654 .99[1661.09 ms]
+2021-12-30 09:08:36,655 Channel (server worker num[20]):
+2021-12-30 09:08:36,655 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:08:36,656 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:09:06,686 ==================== TRACER ======================
+2021-12-30 09:09:06,687 Channel (server worker num[20]):
+2021-12-30 09:09:06,688 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:09:06,689 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:09:36,719 ==================== TRACER ======================
+2021-12-30 09:09:36,720 Channel (server worker num[20]):
+2021-12-30 09:09:36,721 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:09:36,722 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:10:06,752 ==================== TRACER ======================
+2021-12-30 09:10:06,753 Channel (server worker num[20]):
+2021-12-30 09:10:06,754 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:10:06,755 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:10:36,785 ==================== TRACER ======================
+2021-12-30 09:10:36,786 Channel (server worker num[20]):
+2021-12-30 09:10:36,787 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:10:36,787 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:11:06,818 ==================== TRACER ======================
+2021-12-30 09:11:06,819 Channel (server worker num[20]):
+2021-12-30 09:11:06,819 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:11:06,820 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:11:36,851 ==================== TRACER ======================
+2021-12-30 09:11:36,851 Channel (server worker num[20]):
+2021-12-30 09:11:36,852 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:11:36,853 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:12:06,883 ==================== TRACER ======================
+2021-12-30 09:12:06,884 Channel (server worker num[20]):
+2021-12-30 09:12:06,885 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:12:06,886 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:12:36,900 ==================== TRACER ======================
+2021-12-30 09:12:36,901 Channel (server worker num[20]):
+2021-12-30 09:12:36,902 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:12:36,903 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:13:06,933 ==================== TRACER ======================
+2021-12-30 09:13:06,934 Channel (server worker num[20]):
+2021-12-30 09:13:06,935 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:13:06,935 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:13:36,966 ==================== TRACER ======================
+2021-12-30 09:13:36,967 Channel (server worker num[20]):
+2021-12-30 09:13:36,967 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:13:36,968 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:14:06,998 ==================== TRACER ======================
+2021-12-30 09:14:06,999 Channel (server worker num[20]):
+2021-12-30 09:14:07,000 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:14:07,001 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:14:37,031 ==================== TRACER ======================
+2021-12-30 09:14:37,032 Channel (server worker num[20]):
+2021-12-30 09:14:37,033 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:14:37,034 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:15:07,064 ==================== TRACER ======================
+2021-12-30 09:15:07,065 Channel (server worker num[20]):
+2021-12-30 09:15:07,068 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:15:07,068 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:15:37,096 ==================== TRACER ======================
+2021-12-30 09:15:37,097 Channel (server worker num[20]):
+2021-12-30 09:15:37,098 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:15:37,099 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:16:07,129 ==================== TRACER ======================
+2021-12-30 09:16:07,130 Channel (server worker num[20]):
+2021-12-30 09:16:07,131 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:16:07,131 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:16:37,162 ==================== TRACER ======================
+2021-12-30 09:16:37,163 Channel (server worker num[20]):
+2021-12-30 09:16:37,163 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:16:37,164 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:17:07,195 ==================== TRACER ======================
+2021-12-30 09:17:07,195 Channel (server worker num[20]):
+2021-12-30 09:17:07,196 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:17:07,197 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:17:37,227 ==================== TRACER ======================
+2021-12-30 09:17:37,228 Channel (server worker num[20]):
+2021-12-30 09:17:37,229 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:17:37,230 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:18:07,260 ==================== TRACER ======================
+2021-12-30 09:18:07,261 Channel (server worker num[20]):
+2021-12-30 09:18:07,262 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:18:07,263 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:18:37,293 ==================== TRACER ======================
+2021-12-30 09:18:37,294 Channel (server worker num[20]):
+2021-12-30 09:18:37,295 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:18:37,296 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:19:07,326 ==================== TRACER ======================
+2021-12-30 09:19:07,327 Channel (server worker num[20]):
+2021-12-30 09:19:07,328 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:19:07,328 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:19:39,524 ==================== TRACER ======================
+2021-12-30 09:19:39,525 Channel (server worker num[20]):
+2021-12-30 09:19:39,527 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:19:39,528 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:20:11,284 ==================== TRACER ======================
+2021-12-30 09:20:11,286 Channel (server worker num[20]):
+2021-12-30 09:20:11,289 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:20:11,289 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:20:41,320 ==================== TRACER ======================
+2021-12-30 09:20:41,321 DAGExecutor:
+2021-12-30 09:20:41,321 Query count[1]
+2021-12-30 09:20:41,321 QPS[0.03333333333333333 q/s]
+2021-12-30 09:20:41,322 Succ[0.0]
+2021-12-30 09:20:41,322 Error req[0]
+2021-12-30 09:20:41,322 Latency:
+2021-12-30 09:20:41,322 ave[2327.767 ms]
+2021-12-30 09:20:41,322 .50[2327.767 ms]
+2021-12-30 09:20:41,323 .60[2327.767 ms]
+2021-12-30 09:20:41,323 .70[2327.767 ms]
+2021-12-30 09:20:41,323 .80[2327.767 ms]
+2021-12-30 09:20:41,323 .90[2327.767 ms]
+2021-12-30 09:20:41,323 .95[2327.767 ms]
+2021-12-30 09:20:41,323 .99[2327.767 ms]
+2021-12-30 09:20:41,324 Channel (server worker num[20]):
+2021-12-30 09:20:41,324 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:20:41,325 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:21:11,356 ==================== TRACER ======================
+2021-12-30 09:21:11,356 Channel (server worker num[20]):
+2021-12-30 09:21:11,357 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:21:11,358 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:21:19,661 ==================== TRACER ======================
+2021-12-30 09:21:19,663 Channel (server worker num[20]):
+2021-12-30 09:21:19,665 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:21:19,666 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:21:49,668 ==================== TRACER ======================
+2021-12-30 09:21:49,669 DAGExecutor:
+2021-12-30 09:21:49,669 Query count[1]
+2021-12-30 09:21:49,670 QPS[0.03333333333333333 q/s]
+2021-12-30 09:21:49,670 Succ[0.0]
+2021-12-30 09:21:49,670 Error req[0]
+2021-12-30 09:21:49,670 Latency:
+2021-12-30 09:21:49,670 ave[1667.456 ms]
+2021-12-30 09:21:49,670 .50[1667.456 ms]
+2021-12-30 09:21:49,671 .60[1667.456 ms]
+2021-12-30 09:21:49,671 .70[1667.456 ms]
+2021-12-30 09:21:49,671 .80[1667.456 ms]
+2021-12-30 09:21:49,671 .90[1667.456 ms]
+2021-12-30 09:21:49,671 .95[1667.456 ms]
+2021-12-30 09:21:49,671 .99[1667.456 ms]
+2021-12-30 09:21:49,672 Channel (server worker num[20]):
+2021-12-30 09:21:49,672 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:21:49,673 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:22:19,704 ==================== TRACER ======================
+2021-12-30 09:22:19,704 Channel (server worker num[20]):
+2021-12-30 09:22:19,705 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:22:19,706 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:22:49,736 ==================== TRACER ======================
+2021-12-30 09:22:49,737 Channel (server worker num[20]):
+2021-12-30 09:22:49,738 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:22:49,739 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:23:19,769 ==================== TRACER ======================
+2021-12-30 09:23:19,770 Channel (server worker num[20]):
+2021-12-30 09:23:19,771 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:23:19,772 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:23:31,946 ==================== TRACER ======================
+2021-12-30 09:23:31,948 Channel (server worker num[20]):
+2021-12-30 09:23:31,951 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:23:31,951 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:24:02,960 ==================== TRACER ======================
+2021-12-30 09:24:02,962 Channel (server worker num[20]):
+2021-12-30 09:24:02,964 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:24:02,965 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:24:32,995 ==================== TRACER ======================
+2021-12-30 09:24:32,996 DAGExecutor:
+2021-12-30 09:24:32,997 Query count[1]
+2021-12-30 09:24:32,997 QPS[0.03333333333333333 q/s]
+2021-12-30 09:24:32,997 Succ[0.0]
+2021-12-30 09:24:32,997 Error req[0]
+2021-12-30 09:24:32,997 Latency:
+2021-12-30 09:24:32,997 ave[2969.908 ms]
+2021-12-30 09:24:32,998 .50[2969.908 ms]
+2021-12-30 09:24:32,998 .60[2969.908 ms]
+2021-12-30 09:24:32,998 .70[2969.908 ms]
+2021-12-30 09:24:32,998 .80[2969.908 ms]
+2021-12-30 09:24:32,998 .90[2969.908 ms]
+2021-12-30 09:24:32,998 .95[2969.908 ms]
+2021-12-30 09:24:32,999 .99[2969.908 ms]
+2021-12-30 09:24:32,999 Channel (server worker num[20]):
+2021-12-30 09:24:33,000 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:24:33,000 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:25:03,031 ==================== TRACER ======================
+2021-12-30 09:25:03,032 Channel (server worker num[20]):
+2021-12-30 09:25:03,032 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:25:03,033 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:25:33,049 ==================== TRACER ======================
+2021-12-30 09:25:33,050 Channel (server worker num[20]):
+2021-12-30 09:25:33,051 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:25:33,052 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:25:54,144 ==================== TRACER ======================
+2021-12-30 09:25:54,146 Channel (server worker num[20]):
+2021-12-30 09:25:54,149 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:25:54,149 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:26:24,168 ==================== TRACER ======================
+2021-12-30 09:26:24,170 Op(ppyolo_mbv3):
+2021-12-30 09:26:24,170 in[11.296 ms]
+2021-12-30 09:26:24,170 prep[49.388 ms]
+2021-12-30 09:26:24,170 midp[1611.812 ms]
+2021-12-30 09:26:24,170 postp[11.047 ms]
+2021-12-30 09:26:24,171 out[2.017 ms]
+2021-12-30 09:26:24,171 idle[0.007898265264956517]
+2021-12-30 09:26:24,171 DAGExecutor:
+2021-12-30 09:26:24,171 Query count[1]
+2021-12-30 09:26:24,171 QPS[0.03333333333333333 q/s]
+2021-12-30 09:26:24,171 Succ[1.0]
+2021-12-30 09:26:24,172 Error req[]
+2021-12-30 09:26:24,172 Latency:
+2021-12-30 09:26:24,172 ave[1682.576 ms]
+2021-12-30 09:26:24,172 .50[1682.576 ms]
+2021-12-30 09:26:24,172 .60[1682.576 ms]
+2021-12-30 09:26:24,173 .70[1682.576 ms]
+2021-12-30 09:26:24,173 .80[1682.576 ms]
+2021-12-30 09:26:24,173 .90[1682.576 ms]
+2021-12-30 09:26:24,173 .95[1682.576 ms]
+2021-12-30 09:26:24,173 .99[1682.576 ms]
+2021-12-30 09:26:24,173 Channel (server worker num[20]):
+2021-12-30 09:26:24,174 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:26:24,175 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:26:54,205 ==================== TRACER ======================
+2021-12-30 09:26:54,206 Channel (server worker num[20]):
+2021-12-30 09:26:54,207 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:26:54,208 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:39:07,251 ==================== TRACER ======================
+2021-12-30 09:39:07,254 Channel (server worker num[20]):
+2021-12-30 09:39:07,256 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:39:07,256 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:39:37,287 ==================== TRACER ======================
+2021-12-30 09:39:37,288 Op(ppyolo_mbv3):
+2021-12-30 09:39:37,289 in[16052.636 ms]
+2021-12-30 09:39:37,289 prep[52.574 ms]
+2021-12-30 09:39:37,289 midp[1722.923 ms]
+2021-12-30 09:39:37,289 postp[11.854 ms]
+2021-12-30 09:39:37,289 out[1.654 ms]
+2021-12-30 09:39:37,289 idle[0.8998213785379944]
+2021-12-30 09:39:37,290 DAGExecutor:
+2021-12-30 09:39:37,290 Query count[1]
+2021-12-30 09:39:37,290 QPS[0.03333333333333333 q/s]
+2021-12-30 09:39:37,290 Succ[1.0]
+2021-12-30 09:39:37,290 Error req[]
+2021-12-30 09:39:37,291 Latency:
+2021-12-30 09:39:37,291 ave[1797.892 ms]
+2021-12-30 09:39:37,291 .50[1797.892 ms]
+2021-12-30 09:39:37,291 .60[1797.892 ms]
+2021-12-30 09:39:37,291 .70[1797.892 ms]
+2021-12-30 09:39:37,291 .80[1797.892 ms]
+2021-12-30 09:39:37,292 .90[1797.892 ms]
+2021-12-30 09:39:37,292 .95[1797.892 ms]
+2021-12-30 09:39:37,292 .99[1797.892 ms]
+2021-12-30 09:39:37,292 Channel (server worker num[20]):
+2021-12-30 09:39:37,293 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:39:37,294 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:40:07,324 ==================== TRACER ======================
+2021-12-30 09:40:07,325 DAGExecutor:
+2021-12-30 09:40:07,325 Query count[1]
+2021-12-30 09:40:07,325 QPS[0.03333333333333333 q/s]
+2021-12-30 09:40:07,326 Succ[0.0]
+2021-12-30 09:40:07,326 Error req[1]
+2021-12-30 09:40:07,326 Latency:
+2021-12-30 09:40:07,326 ave[118.163 ms]
+2021-12-30 09:40:07,326 .50[118.163 ms]
+2021-12-30 09:40:07,326 .60[118.163 ms]
+2021-12-30 09:40:07,327 .70[118.163 ms]
+2021-12-30 09:40:07,327 .80[118.163 ms]
+2021-12-30 09:40:07,327 .90[118.163 ms]
+2021-12-30 09:40:07,327 .95[118.163 ms]
+2021-12-30 09:40:07,327 .99[118.163 ms]
+2021-12-30 09:40:07,327 Channel (server worker num[20]):
+2021-12-30 09:40:07,328 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:40:07,329 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:40:37,359 ==================== TRACER ======================
+2021-12-30 09:40:37,360 Channel (server worker num[20]):
+2021-12-30 09:40:37,361 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:40:37,362 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:41:07,392 ==================== TRACER ======================
+2021-12-30 09:41:07,393 DAGExecutor:
+2021-12-30 09:41:07,394 Query count[2]
+2021-12-30 09:41:07,394 QPS[0.06666666666666667 q/s]
+2021-12-30 09:41:07,394 Succ[0.0]
+2021-12-30 09:41:07,394 Error req[2, 3]
+2021-12-30 09:41:07,394 Latency:
+2021-12-30 09:41:07,395 ave[91.7105 ms]
+2021-12-30 09:41:07,395 .50[110.376 ms]
+2021-12-30 09:41:07,395 .60[110.376 ms]
+2021-12-30 09:41:07,395 .70[110.376 ms]
+2021-12-30 09:41:07,395 .80[110.376 ms]
+2021-12-30 09:41:07,395 .90[110.376 ms]
+2021-12-30 09:41:07,396 .95[110.376 ms]
+2021-12-30 09:41:07,396 .99[110.376 ms]
+2021-12-30 09:41:07,396 Channel (server worker num[20]):
+2021-12-30 09:41:07,397 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:41:07,397 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:41:37,428 ==================== TRACER ======================
+2021-12-30 09:41:37,429 Channel (server worker num[20]):
+2021-12-30 09:41:37,430 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:41:37,430 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:42:01,898 ==================== TRACER ======================
+2021-12-30 09:42:01,900 Channel (server worker num[20]):
+2021-12-30 09:42:01,902 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:42:01,902 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:42:24,079 ==================== TRACER ======================
+2021-12-30 09:42:24,081 Channel (server worker num[20]):
+2021-12-30 09:42:24,083 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:42:24,084 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:42:54,114 ==================== TRACER ======================
+2021-12-30 09:42:54,116 Op(ppyolo_mbv3):
+2021-12-30 09:42:54,116 in[238.366 ms]
+2021-12-30 09:42:54,116 prep[78.16 ms]
+2021-12-30 09:42:54,116 midp[1605.08 ms]
+2021-12-30 09:42:54,117 postp[10.166 ms]
+2021-12-30 09:42:54,117 out[1.317 ms]
+2021-12-30 09:42:54,117 idle[0.12398963524183315]
+2021-12-30 09:42:54,117 DAGExecutor:
+2021-12-30 09:42:54,117 Query count[1]
+2021-12-30 09:42:54,117 QPS[0.03333333333333333 q/s]
+2021-12-30 09:42:54,118 Succ[1.0]
+2021-12-30 09:42:54,118 Error req[]
+2021-12-30 09:42:54,118 Latency:
+2021-12-30 09:42:54,118 ave[1705.072 ms]
+2021-12-30 09:42:54,118 .50[1705.072 ms]
+2021-12-30 09:42:54,119 .60[1705.072 ms]
+2021-12-30 09:42:54,119 .70[1705.072 ms]
+2021-12-30 09:42:54,119 .80[1705.072 ms]
+2021-12-30 09:42:54,119 .90[1705.072 ms]
+2021-12-30 09:42:54,119 .95[1705.072 ms]
+2021-12-30 09:42:54,119 .99[1705.072 ms]
+2021-12-30 09:42:54,119 Channel (server worker num[20]):
+2021-12-30 09:42:54,120 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:42:54,121 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:43:24,151 ==================== TRACER ======================
+2021-12-30 09:43:24,153 DAGExecutor:
+2021-12-30 09:43:24,153 Query count[2]
+2021-12-30 09:43:24,153 QPS[0.06666666666666667 q/s]
+2021-12-30 09:43:24,153 Succ[0.0]
+2021-12-30 09:43:24,153 Error req[1, 2]
+2021-12-30 09:43:24,154 Latency:
+2021-12-30 09:43:24,154 ave[111.03649999999999 ms]
+2021-12-30 09:43:24,154 .50[111.139 ms]
+2021-12-30 09:43:24,154 .60[111.139 ms]
+2021-12-30 09:43:24,154 .70[111.139 ms]
+2021-12-30 09:43:24,154 .80[111.139 ms]
+2021-12-30 09:43:24,155 .90[111.139 ms]
+2021-12-30 09:43:24,155 .95[111.139 ms]
+2021-12-30 09:43:24,155 .99[111.139 ms]
+2021-12-30 09:43:24,155 Channel (server worker num[20]):
+2021-12-30 09:43:24,156 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:43:24,156 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:43:54,187 ==================== TRACER ======================
+2021-12-30 09:43:54,188 Channel (server worker num[20]):
+2021-12-30 09:43:54,188 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:43:54,189 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:44:24,220 ==================== TRACER ======================
+2021-12-30 09:44:24,220 Channel (server worker num[20]):
+2021-12-30 09:44:24,221 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:44:24,222 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:44:54,933 ==================== TRACER ======================
+2021-12-30 09:44:54,935 Channel (server worker num[20]):
+2021-12-30 09:44:54,937 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:44:54,938 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:45:24,968 ==================== TRACER ======================
+2021-12-30 09:45:24,970 Op(ppyolo_mbv3):
+2021-12-30 09:45:24,970 in[1609.535 ms]
+2021-12-30 09:45:24,970 prep[67.209 ms]
+2021-12-30 09:45:24,970 midp[1766.049 ms]
+2021-12-30 09:45:24,970 postp[11.493 ms]
+2021-12-30 09:45:24,971 out[1.567 ms]
+2021-12-30 09:45:24,971 idle[0.4661951767045647]
+2021-12-30 09:45:24,971 DAGExecutor:
+2021-12-30 09:45:24,971 Query count[2]
+2021-12-30 09:45:24,971 QPS[0.06666666666666667 q/s]
+2021-12-30 09:45:24,972 Succ[0.5]
+2021-12-30 09:45:24,972 Error req[1]
+2021-12-30 09:45:24,972 Latency:
+2021-12-30 09:45:24,972 ave[984.508 ms]
+2021-12-30 09:45:24,972 .50[1859.535 ms]
+2021-12-30 09:45:24,972 .60[1859.535 ms]
+2021-12-30 09:45:24,973 .70[1859.535 ms]
+2021-12-30 09:45:24,973 .80[1859.535 ms]
+2021-12-30 09:45:24,973 .90[1859.535 ms]
+2021-12-30 09:45:24,973 .95[1859.535 ms]
+2021-12-30 09:45:24,973 .99[1859.535 ms]
+2021-12-30 09:45:24,973 Channel (server worker num[20]):
+2021-12-30 09:45:24,974 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:45:24,975 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:45:55,005 ==================== TRACER ======================
+2021-12-30 09:45:55,006 Channel (server worker num[20]):
+2021-12-30 09:45:55,007 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:45:55,007 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:46:17,802 ==================== TRACER ======================
+2021-12-30 09:46:17,804 Channel (server worker num[20]):
+2021-12-30 09:46:17,807 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:46:17,808 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:46:47,838 ==================== TRACER ======================
+2021-12-30 09:46:47,840 Op(ppyolo_mbv3):
+2021-12-30 09:46:47,840 in[842.57 ms]
+2021-12-30 09:46:47,840 prep[79.406 ms]
+2021-12-30 09:46:47,841 midp[1591.784 ms]
+2021-12-30 09:46:47,841 postp[10.156 ms]
+2021-12-30 09:46:47,841 out[1.137 ms]
+2021-12-30 09:46:47,841 idle[0.33413437262504997]
+2021-12-30 09:46:47,841 DAGExecutor:
+2021-12-30 09:46:47,841 Query count[2]
+2021-12-30 09:46:47,842 QPS[0.06666666666666667 q/s]
+2021-12-30 09:46:47,842 Succ[0.5]
+2021-12-30 09:46:47,842 Error req[1]
+2021-12-30 09:46:47,842 Latency:
+2021-12-30 09:46:47,842 ave[910.8975 ms]
+2021-12-30 09:46:47,842 .50[1695.613 ms]
+2021-12-30 09:46:47,843 .60[1695.613 ms]
+2021-12-30 09:46:47,843 .70[1695.613 ms]
+2021-12-30 09:46:47,843 .80[1695.613 ms]
+2021-12-30 09:46:47,843 .90[1695.613 ms]
+2021-12-30 09:46:47,843 .95[1695.613 ms]
+2021-12-30 09:46:47,843 .99[1695.613 ms]
+2021-12-30 09:46:47,844 Channel (server worker num[20]):
+2021-12-30 09:46:47,844 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:46:47,845 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:47:17,876 ==================== TRACER ======================
+2021-12-30 09:47:17,877 Channel (server worker num[20]):
+2021-12-30 09:47:17,877 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:47:17,878 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:47:47,909 ==================== TRACER ======================
+2021-12-30 09:47:47,910 Channel (server worker num[20]):
+2021-12-30 09:47:47,910 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:47:47,911 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:48:17,942 ==================== TRACER ======================
+2021-12-30 09:48:17,943 Channel (server worker num[20]):
+2021-12-30 09:48:17,944 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:48:17,944 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:48:47,975 ==================== TRACER ======================
+2021-12-30 09:48:47,976 Channel (server worker num[20]):
+2021-12-30 09:48:47,977 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:48:47,977 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:49:18,008 ==================== TRACER ======================
+2021-12-30 09:49:18,009 Channel (server worker num[20]):
+2021-12-30 09:49:18,011 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:49:18,012 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:49:48,042 ==================== TRACER ======================
+2021-12-30 09:49:48,043 Channel (server worker num[20]):
+2021-12-30 09:49:48,044 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:49:48,045 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:50:18,057 ==================== TRACER ======================
+2021-12-30 09:50:18,058 Channel (server worker num[20]):
+2021-12-30 09:50:18,059 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:50:18,060 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:50:48,091 ==================== TRACER ======================
+2021-12-30 09:50:48,092 Channel (server worker num[20]):
+2021-12-30 09:50:48,093 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:50:48,093 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:51:18,124 ==================== TRACER ======================
+2021-12-30 09:51:18,125 Channel (server worker num[20]):
+2021-12-30 09:51:18,126 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:51:18,127 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:51:48,157 ==================== TRACER ======================
+2021-12-30 09:51:48,158 Channel (server worker num[20]):
+2021-12-30 09:51:48,159 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:51:48,159 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:52:18,190 ==================== TRACER ======================
+2021-12-30 09:52:18,191 Channel (server worker num[20]):
+2021-12-30 09:52:18,192 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:52:18,193 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:52:48,220 ==================== TRACER ======================
+2021-12-30 09:52:48,221 Channel (server worker num[20]):
+2021-12-30 09:52:48,222 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:52:48,223 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:53:18,253 ==================== TRACER ======================
+2021-12-30 09:53:18,254 Channel (server worker num[20]):
+2021-12-30 09:53:18,255 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:53:18,256 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:53:48,286 ==================== TRACER ======================
+2021-12-30 09:53:48,287 Channel (server worker num[20]):
+2021-12-30 09:53:48,288 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:53:48,289 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:54:18,318 ==================== TRACER ======================
+2021-12-30 09:54:18,319 Channel (server worker num[20]):
+2021-12-30 09:54:18,319 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:54:18,320 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:54:48,351 ==================== TRACER ======================
+2021-12-30 09:54:48,352 Channel (server worker num[20]):
+2021-12-30 09:54:48,352 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:54:48,353 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:55:18,384 ==================== TRACER ======================
+2021-12-30 09:55:18,385 Channel (server worker num[20]):
+2021-12-30 09:55:18,385 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:55:18,386 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:55:48,417 ==================== TRACER ======================
+2021-12-30 09:55:48,418 Channel (server worker num[20]):
+2021-12-30 09:55:48,418 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:55:48,419 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:56:18,450 ==================== TRACER ======================
+2021-12-30 09:56:18,451 Channel (server worker num[20]):
+2021-12-30 09:56:18,451 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:56:18,452 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:56:48,483 ==================== TRACER ======================
+2021-12-30 09:56:48,484 Channel (server worker num[20]):
+2021-12-30 09:56:48,484 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:56:48,485 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:57:18,516 ==================== TRACER ======================
+2021-12-30 09:57:18,517 Channel (server worker num[20]):
+2021-12-30 09:57:18,518 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:57:18,518 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:57:48,549 ==================== TRACER ======================
+2021-12-30 09:57:48,550 Channel (server worker num[20]):
+2021-12-30 09:57:48,550 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:57:48,551 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:58:18,582 ==================== TRACER ======================
+2021-12-30 09:58:18,583 Channel (server worker num[20]):
+2021-12-30 09:58:18,584 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:58:18,584 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:58:48,615 ==================== TRACER ======================
+2021-12-30 09:58:48,616 Channel (server worker num[20]):
+2021-12-30 09:58:48,617 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:58:48,617 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:59:18,648 ==================== TRACER ======================
+2021-12-30 09:59:18,649 Channel (server worker num[20]):
+2021-12-30 09:59:18,650 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:59:18,650 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 09:59:48,680 ==================== TRACER ======================
+2021-12-30 09:59:48,681 Channel (server worker num[20]):
+2021-12-30 09:59:48,682 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 09:59:48,683 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:00:18,713 ==================== TRACER ======================
+2021-12-30 10:00:18,714 Channel (server worker num[20]):
+2021-12-30 10:00:18,715 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:00:18,716 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:00:48,731 ==================== TRACER ======================
+2021-12-30 10:00:48,732 Channel (server worker num[20]):
+2021-12-30 10:00:48,733 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:00:48,734 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:01:18,748 ==================== TRACER ======================
+2021-12-30 10:01:18,749 Channel (server worker num[20]):
+2021-12-30 10:01:18,750 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:01:18,751 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:01:48,781 ==================== TRACER ======================
+2021-12-30 10:01:48,782 Channel (server worker num[20]):
+2021-12-30 10:01:48,783 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:01:48,784 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:02:18,814 ==================== TRACER ======================
+2021-12-30 10:02:18,815 Channel (server worker num[20]):
+2021-12-30 10:02:18,816 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:02:18,817 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:02:48,847 ==================== TRACER ======================
+2021-12-30 10:02:48,848 Channel (server worker num[20]):
+2021-12-30 10:02:48,849 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:02:48,850 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:03:18,880 ==================== TRACER ======================
+2021-12-30 10:03:18,881 Channel (server worker num[20]):
+2021-12-30 10:03:18,882 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:03:18,883 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:03:48,892 ==================== TRACER ======================
+2021-12-30 10:03:48,893 Channel (server worker num[20]):
+2021-12-30 10:03:48,894 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:03:48,895 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:04:18,925 ==================== TRACER ======================
+2021-12-30 10:04:18,926 Channel (server worker num[20]):
+2021-12-30 10:04:18,927 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:04:18,928 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:04:48,958 ==================== TRACER ======================
+2021-12-30 10:04:48,959 Channel (server worker num[20]):
+2021-12-30 10:04:48,960 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:04:48,961 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:05:18,991 ==================== TRACER ======================
+2021-12-30 10:05:18,992 Channel (server worker num[20]):
+2021-12-30 10:05:18,993 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:05:18,994 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:05:49,000 ==================== TRACER ======================
+2021-12-30 10:05:49,001 Channel (server worker num[20]):
+2021-12-30 10:05:49,002 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:05:49,003 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:06:19,033 ==================== TRACER ======================
+2021-12-30 10:06:19,034 Channel (server worker num[20]):
+2021-12-30 10:06:19,035 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:06:19,036 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:06:49,066 ==================== TRACER ======================
+2021-12-30 10:06:49,067 Channel (server worker num[20]):
+2021-12-30 10:06:49,068 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:06:49,069 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:07:19,099 ==================== TRACER ======================
+2021-12-30 10:07:19,100 Channel (server worker num[20]):
+2021-12-30 10:07:19,101 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:07:19,102 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:07:49,132 ==================== TRACER ======================
+2021-12-30 10:07:49,133 Channel (server worker num[20]):
+2021-12-30 10:07:49,134 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:07:49,135 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:08:19,165 ==================== TRACER ======================
+2021-12-30 10:08:19,166 Channel (server worker num[20]):
+2021-12-30 10:08:19,167 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:08:19,168 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:08:49,198 ==================== TRACER ======================
+2021-12-30 10:08:49,199 Channel (server worker num[20]):
+2021-12-30 10:08:49,200 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:08:49,201 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:09:19,231 ==================== TRACER ======================
+2021-12-30 10:09:19,232 Channel (server worker num[20]):
+2021-12-30 10:09:19,233 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:09:19,234 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:09:49,265 ==================== TRACER ======================
+2021-12-30 10:09:49,266 Channel (server worker num[20]):
+2021-12-30 10:09:49,266 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:09:49,267 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:10:19,294 ==================== TRACER ======================
+2021-12-30 10:10:19,295 Channel (server worker num[20]):
+2021-12-30 10:10:19,296 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:10:19,296 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:10:49,327 ==================== TRACER ======================
+2021-12-30 10:10:49,328 Channel (server worker num[20]):
+2021-12-30 10:10:49,329 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:10:49,329 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:11:19,360 ==================== TRACER ======================
+2021-12-30 10:11:19,361 Channel (server worker num[20]):
+2021-12-30 10:11:19,362 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:11:19,363 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:11:49,368 ==================== TRACER ======================
+2021-12-30 10:11:49,369 Channel (server worker num[20]):
+2021-12-30 10:11:49,370 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:11:49,371 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:12:19,401 ==================== TRACER ======================
+2021-12-30 10:12:19,402 Channel (server worker num[20]):
+2021-12-30 10:12:19,403 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:12:19,404 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:12:49,434 ==================== TRACER ======================
+2021-12-30 10:12:49,435 Channel (server worker num[20]):
+2021-12-30 10:12:49,436 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:12:49,437 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:13:19,468 ==================== TRACER ======================
+2021-12-30 10:13:19,469 Channel (server worker num[20]):
+2021-12-30 10:13:19,469 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:13:19,470 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:13:49,500 ==================== TRACER ======================
+2021-12-30 10:13:49,501 Channel (server worker num[20]):
+2021-12-30 10:13:49,502 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:13:49,503 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:14:19,533 ==================== TRACER ======================
+2021-12-30 10:14:19,534 Channel (server worker num[20]):
+2021-12-30 10:14:19,535 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:14:19,536 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:14:49,566 ==================== TRACER ======================
+2021-12-30 10:14:49,567 Channel (server worker num[20]):
+2021-12-30 10:14:49,570 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:14:49,570 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:15:19,592 ==================== TRACER ======================
+2021-12-30 10:15:19,593 Channel (server worker num[20]):
+2021-12-30 10:15:19,594 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:15:19,595 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:15:49,625 ==================== TRACER ======================
+2021-12-30 10:15:49,626 Channel (server worker num[20]):
+2021-12-30 10:15:49,627 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:15:49,628 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:16:19,658 ==================== TRACER ======================
+2021-12-30 10:16:19,659 Channel (server worker num[20]):
+2021-12-30 10:16:19,660 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:16:19,661 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:16:49,692 ==================== TRACER ======================
+2021-12-30 10:16:49,693 Channel (server worker num[20]):
+2021-12-30 10:16:49,693 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:16:49,694 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:17:19,725 ==================== TRACER ======================
+2021-12-30 10:17:19,726 Channel (server worker num[20]):
+2021-12-30 10:17:19,727 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:17:19,727 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:17:49,732 ==================== TRACER ======================
+2021-12-30 10:17:49,733 Channel (server worker num[20]):
+2021-12-30 10:17:49,734 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:17:49,735 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:18:19,765 ==================== TRACER ======================
+2021-12-30 10:18:19,766 Channel (server worker num[20]):
+2021-12-30 10:18:19,767 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:18:19,768 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:18:49,798 ==================== TRACER ======================
+2021-12-30 10:18:49,799 Channel (server worker num[20]):
+2021-12-30 10:18:49,800 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:18:49,801 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:19:19,811 ==================== TRACER ======================
+2021-12-30 10:19:19,812 Channel (server worker num[20]):
+2021-12-30 10:19:19,813 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:19:19,814 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:19:49,844 ==================== TRACER ======================
+2021-12-30 10:19:49,845 Channel (server worker num[20]):
+2021-12-30 10:19:49,846 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:19:49,847 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:20:19,877 ==================== TRACER ======================
+2021-12-30 10:20:19,878 Channel (server worker num[20]):
+2021-12-30 10:20:19,879 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:20:19,880 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:20:49,910 ==================== TRACER ======================
+2021-12-30 10:20:49,911 Channel (server worker num[20]):
+2021-12-30 10:20:49,912 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:20:49,913 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:21:19,943 ==================== TRACER ======================
+2021-12-30 10:21:19,944 Channel (server worker num[20]):
+2021-12-30 10:21:19,945 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:21:19,946 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:21:49,976 ==================== TRACER ======================
+2021-12-30 10:21:49,977 Channel (server worker num[20]):
+2021-12-30 10:21:49,978 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:21:49,979 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:22:20,009 ==================== TRACER ======================
+2021-12-30 10:22:20,010 Channel (server worker num[20]):
+2021-12-30 10:22:20,011 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:22:20,012 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:22:50,042 ==================== TRACER ======================
+2021-12-30 10:22:50,043 Channel (server worker num[20]):
+2021-12-30 10:22:50,044 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:22:50,045 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:23:20,075 ==================== TRACER ======================
+2021-12-30 10:23:20,076 Channel (server worker num[20]):
+2021-12-30 10:23:20,077 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:23:20,078 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:23:50,108 ==================== TRACER ======================
+2021-12-30 10:23:50,109 Channel (server worker num[20]):
+2021-12-30 10:23:50,110 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:23:50,111 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:24:20,141 ==================== TRACER ======================
+2021-12-30 10:24:20,142 Channel (server worker num[20]):
+2021-12-30 10:24:20,143 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:24:20,144 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:24:50,165 ==================== TRACER ======================
+2021-12-30 10:24:50,166 Channel (server worker num[20]):
+2021-12-30 10:24:50,167 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:24:50,168 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:25:20,198 ==================== TRACER ======================
+2021-12-30 10:25:20,199 Channel (server worker num[20]):
+2021-12-30 10:25:20,200 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:25:20,201 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:25:50,231 ==================== TRACER ======================
+2021-12-30 10:25:50,232 Channel (server worker num[20]):
+2021-12-30 10:25:50,233 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:25:50,234 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:26:20,264 ==================== TRACER ======================
+2021-12-30 10:26:20,265 Channel (server worker num[20]):
+2021-12-30 10:26:20,266 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:26:20,267 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:26:50,297 ==================== TRACER ======================
+2021-12-30 10:26:50,298 Channel (server worker num[20]):
+2021-12-30 10:26:50,299 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:26:50,300 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:27:20,330 ==================== TRACER ======================
+2021-12-30 10:27:20,331 Channel (server worker num[20]):
+2021-12-30 10:27:20,332 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:27:20,333 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:27:50,363 ==================== TRACER ======================
+2021-12-30 10:27:50,364 Channel (server worker num[20]):
+2021-12-30 10:27:50,365 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:27:50,366 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:28:20,396 ==================== TRACER ======================
+2021-12-30 10:28:20,397 Channel (server worker num[20]):
+2021-12-30 10:28:20,398 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:28:20,399 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:28:50,429 ==================== TRACER ======================
+2021-12-30 10:28:50,430 Channel (server worker num[20]):
+2021-12-30 10:28:50,431 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:28:50,432 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:29:20,462 ==================== TRACER ======================
+2021-12-30 10:29:20,463 Channel (server worker num[20]):
+2021-12-30 10:29:20,464 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:29:20,465 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:29:50,495 ==================== TRACER ======================
+2021-12-30 10:29:50,496 Channel (server worker num[20]):
+2021-12-30 10:29:50,497 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:29:50,498 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:30:20,528 ==================== TRACER ======================
+2021-12-30 10:30:20,529 Channel (server worker num[20]):
+2021-12-30 10:30:20,530 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:30:20,531 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:30:50,561 ==================== TRACER ======================
+2021-12-30 10:30:50,562 Channel (server worker num[20]):
+2021-12-30 10:30:50,563 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:30:50,564 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:31:20,594 ==================== TRACER ======================
+2021-12-30 10:31:20,595 Channel (server worker num[20]):
+2021-12-30 10:31:20,596 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:31:20,597 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:31:50,627 ==================== TRACER ======================
+2021-12-30 10:31:50,628 Channel (server worker num[20]):
+2021-12-30 10:31:50,629 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:31:50,630 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:32:20,660 ==================== TRACER ======================
+2021-12-30 10:32:20,662 Channel (server worker num[20]):
+2021-12-30 10:32:20,662 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:32:20,663 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:32:50,694 ==================== TRACER ======================
+2021-12-30 10:32:50,695 Channel (server worker num[20]):
+2021-12-30 10:32:50,695 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:32:50,696 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:33:20,727 ==================== TRACER ======================
+2021-12-30 10:33:20,728 Channel (server worker num[20]):
+2021-12-30 10:33:20,733 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:33:20,733 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:33:50,764 ==================== TRACER ======================
+2021-12-30 10:33:50,765 Channel (server worker num[20]):
+2021-12-30 10:33:50,766 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:33:50,766 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:34:20,797 ==================== TRACER ======================
+2021-12-30 10:34:20,798 Channel (server worker num[20]):
+2021-12-30 10:34:20,799 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:34:20,799 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:34:50,830 ==================== TRACER ======================
+2021-12-30 10:34:50,831 Channel (server worker num[20]):
+2021-12-30 10:34:50,831 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:34:50,832 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:35:20,863 ==================== TRACER ======================
+2021-12-30 10:35:20,864 Channel (server worker num[20]):
+2021-12-30 10:35:20,864 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:35:20,865 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:35:50,896 ==================== TRACER ======================
+2021-12-30 10:35:50,897 Channel (server worker num[20]):
+2021-12-30 10:35:50,898 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:35:50,898 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:36:20,929 ==================== TRACER ======================
+2021-12-30 10:36:20,930 Channel (server worker num[20]):
+2021-12-30 10:36:20,930 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:36:20,931 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:36:50,962 ==================== TRACER ======================
+2021-12-30 10:36:50,963 Channel (server worker num[20]):
+2021-12-30 10:36:50,963 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:36:50,964 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:37:20,995 ==================== TRACER ======================
+2021-12-30 10:37:20,996 Channel (server worker num[20]):
+2021-12-30 10:37:20,996 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:37:20,997 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:37:51,028 ==================== TRACER ======================
+2021-12-30 10:37:51,029 Channel (server worker num[20]):
+2021-12-30 10:37:51,030 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:37:51,030 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:38:21,061 ==================== TRACER ======================
+2021-12-30 10:38:21,062 Channel (server worker num[20]):
+2021-12-30 10:38:21,062 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:38:21,063 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:38:51,094 ==================== TRACER ======================
+2021-12-30 10:38:51,095 Channel (server worker num[20]):
+2021-12-30 10:38:51,095 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:38:51,096 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:39:21,127 ==================== TRACER ======================
+2021-12-30 10:39:21,128 Channel (server worker num[20]):
+2021-12-30 10:39:21,128 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:39:21,129 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:39:51,160 ==================== TRACER ======================
+2021-12-30 10:39:51,161 Channel (server worker num[20]):
+2021-12-30 10:39:51,161 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:39:51,162 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:40:21,185 ==================== TRACER ======================
+2021-12-30 10:40:21,186 Channel (server worker num[20]):
+2021-12-30 10:40:21,186 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:40:21,187 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:40:51,218 ==================== TRACER ======================
+2021-12-30 10:40:51,219 Channel (server worker num[20]):
+2021-12-30 10:40:51,219 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:40:51,220 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:41:21,244 ==================== TRACER ======================
+2021-12-30 10:41:21,245 Channel (server worker num[20]):
+2021-12-30 10:41:21,246 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:41:21,247 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:41:51,277 ==================== TRACER ======================
+2021-12-30 10:41:51,278 Channel (server worker num[20]):
+2021-12-30 10:41:51,279 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:41:51,280 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:42:21,310 ==================== TRACER ======================
+2021-12-30 10:42:21,311 Channel (server worker num[20]):
+2021-12-30 10:42:21,312 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:42:21,313 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:42:51,343 ==================== TRACER ======================
+2021-12-30 10:42:51,344 Channel (server worker num[20]):
+2021-12-30 10:42:51,345 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:42:51,345 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:43:21,369 ==================== TRACER ======================
+2021-12-30 10:43:21,370 Channel (server worker num[20]):
+2021-12-30 10:43:21,371 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:43:21,372 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:43:51,402 ==================== TRACER ======================
+2021-12-30 10:43:51,403 Channel (server worker num[20]):
+2021-12-30 10:43:51,404 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:43:51,405 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:44:21,436 ==================== TRACER ======================
+2021-12-30 10:44:21,437 Channel (server worker num[20]):
+2021-12-30 10:44:21,437 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:44:21,438 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:44:51,469 ==================== TRACER ======================
+2021-12-30 10:44:51,469 Channel (server worker num[20]):
+2021-12-30 10:44:51,470 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:44:51,471 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:45:21,501 ==================== TRACER ======================
+2021-12-30 10:45:21,502 Channel (server worker num[20]):
+2021-12-30 10:45:21,503 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:45:21,504 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:45:51,534 ==================== TRACER ======================
+2021-12-30 10:45:51,535 Channel (server worker num[20]):
+2021-12-30 10:45:51,536 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:45:51,537 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:46:21,568 ==================== TRACER ======================
+2021-12-30 10:46:21,569 Channel (server worker num[20]):
+2021-12-30 10:46:21,569 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:46:21,570 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:46:51,584 ==================== TRACER ======================
+2021-12-30 10:46:51,585 Channel (server worker num[20]):
+2021-12-30 10:46:51,586 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:46:51,587 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:47:21,617 ==================== TRACER ======================
+2021-12-30 10:47:21,618 Channel (server worker num[20]):
+2021-12-30 10:47:21,619 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:47:21,620 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:47:51,624 ==================== TRACER ======================
+2021-12-30 10:47:51,625 Channel (server worker num[20]):
+2021-12-30 10:47:51,626 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:47:51,627 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:48:21,656 ==================== TRACER ======================
+2021-12-30 10:48:21,657 Channel (server worker num[20]):
+2021-12-30 10:48:21,658 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:48:21,658 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:48:51,689 ==================== TRACER ======================
+2021-12-30 10:48:51,690 Channel (server worker num[20]):
+2021-12-30 10:48:51,691 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:48:51,691 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:49:21,722 ==================== TRACER ======================
+2021-12-30 10:49:21,723 Channel (server worker num[20]):
+2021-12-30 10:49:21,724 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:49:21,724 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:49:51,755 ==================== TRACER ======================
+2021-12-30 10:49:51,755 Channel (server worker num[20]):
+2021-12-30 10:49:51,756 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:49:51,757 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:50:21,787 ==================== TRACER ======================
+2021-12-30 10:50:21,788 Channel (server worker num[20]):
+2021-12-30 10:50:21,789 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:50:21,790 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:50:51,820 ==================== TRACER ======================
+2021-12-30 10:50:51,821 Channel (server worker num[20]):
+2021-12-30 10:50:51,822 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:50:51,823 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:51:21,853 ==================== TRACER ======================
+2021-12-30 10:51:21,854 Channel (server worker num[20]):
+2021-12-30 10:51:21,855 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:51:21,856 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:51:51,886 ==================== TRACER ======================
+2021-12-30 10:51:51,887 Channel (server worker num[20]):
+2021-12-30 10:51:51,888 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:51:51,888 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:52:21,896 ==================== TRACER ======================
+2021-12-30 10:52:21,897 Channel (server worker num[20]):
+2021-12-30 10:52:21,898 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:52:21,899 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:52:51,929 ==================== TRACER ======================
+2021-12-30 10:52:51,930 Channel (server worker num[20]):
+2021-12-30 10:52:51,930 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:52:51,931 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:53:21,961 ==================== TRACER ======================
+2021-12-30 10:53:21,962 Channel (server worker num[20]):
+2021-12-30 10:53:21,963 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:53:21,964 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:53:51,994 ==================== TRACER ======================
+2021-12-30 10:53:51,995 Channel (server worker num[20]):
+2021-12-30 10:53:51,996 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:53:51,997 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:54:22,027 ==================== TRACER ======================
+2021-12-30 10:54:22,028 Channel (server worker num[20]):
+2021-12-30 10:54:22,029 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:54:22,030 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:54:52,060 ==================== TRACER ======================
+2021-12-30 10:54:52,061 Channel (server worker num[20]):
+2021-12-30 10:54:52,062 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:54:52,062 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:55:22,072 ==================== TRACER ======================
+2021-12-30 10:55:22,073 Channel (server worker num[20]):
+2021-12-30 10:55:22,074 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:55:22,074 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:55:52,105 ==================== TRACER ======================
+2021-12-30 10:55:52,106 Channel (server worker num[20]):
+2021-12-30 10:55:52,107 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:55:52,107 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:56:22,138 ==================== TRACER ======================
+2021-12-30 10:56:22,139 Channel (server worker num[20]):
+2021-12-30 10:56:22,139 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:56:22,140 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:56:52,170 ==================== TRACER ======================
+2021-12-30 10:56:52,171 Channel (server worker num[20]):
+2021-12-30 10:56:52,172 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:56:52,173 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:57:22,203 ==================== TRACER ======================
+2021-12-30 10:57:22,204 Channel (server worker num[20]):
+2021-12-30 10:57:22,205 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:57:22,206 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:57:52,236 ==================== TRACER ======================
+2021-12-30 10:57:52,237 Channel (server worker num[20]):
+2021-12-30 10:57:52,238 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:57:52,238 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:58:22,269 ==================== TRACER ======================
+2021-12-30 10:58:22,270 Channel (server worker num[20]):
+2021-12-30 10:58:22,270 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:58:22,271 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:58:52,302 ==================== TRACER ======================
+2021-12-30 10:58:52,303 Channel (server worker num[20]):
+2021-12-30 10:58:52,303 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:58:52,304 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:59:22,335 ==================== TRACER ======================
+2021-12-30 10:59:22,335 Channel (server worker num[20]):
+2021-12-30 10:59:22,336 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:59:22,337 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 10:59:52,367 ==================== TRACER ======================
+2021-12-30 10:59:52,368 Channel (server worker num[20]):
+2021-12-30 10:59:52,369 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 10:59:52,370 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:00:22,400 ==================== TRACER ======================
+2021-12-30 11:00:22,401 Channel (server worker num[20]):
+2021-12-30 11:00:22,402 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:00:22,403 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:00:52,433 ==================== TRACER ======================
+2021-12-30 11:00:52,434 Channel (server worker num[20]):
+2021-12-30 11:00:52,435 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:00:52,436 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:01:22,466 ==================== TRACER ======================
+2021-12-30 11:01:22,467 Channel (server worker num[20]):
+2021-12-30 11:01:22,468 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:01:22,469 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:01:52,499 ==================== TRACER ======================
+2021-12-30 11:01:52,500 Channel (server worker num[20]):
+2021-12-30 11:01:52,501 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:01:52,502 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:02:22,511 ==================== TRACER ======================
+2021-12-30 11:02:22,512 Channel (server worker num[20]):
+2021-12-30 11:02:22,512 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:02:22,513 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:02:52,544 ==================== TRACER ======================
+2021-12-30 11:02:52,544 Channel (server worker num[20]):
+2021-12-30 11:02:52,545 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:02:52,546 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:03:22,576 ==================== TRACER ======================
+2021-12-30 11:03:22,577 Channel (server worker num[20]):
+2021-12-30 11:03:22,578 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:03:22,579 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:03:52,609 ==================== TRACER ======================
+2021-12-30 11:03:52,610 Channel (server worker num[20]):
+2021-12-30 11:03:52,611 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:03:52,612 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:04:22,642 ==================== TRACER ======================
+2021-12-30 11:04:22,643 Channel (server worker num[20]):
+2021-12-30 11:04:22,644 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:04:22,644 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:04:52,675 ==================== TRACER ======================
+2021-12-30 11:04:52,675 Channel (server worker num[20]):
+2021-12-30 11:04:52,676 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:04:52,677 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:05:22,699 ==================== TRACER ======================
+2021-12-30 11:05:22,700 Channel (server worker num[20]):
+2021-12-30 11:05:22,701 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:05:22,702 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:05:52,732 ==================== TRACER ======================
+2021-12-30 11:05:52,734 DAGExecutor:
+2021-12-30 11:05:52,734 Query count[1]
+2021-12-30 11:05:52,734 QPS[0.03333333333333333 q/s]
+2021-12-30 11:05:52,734 Succ[0.0]
+2021-12-30 11:05:52,734 Error req[2]
+2021-12-30 11:05:52,735 Latency:
+2021-12-30 11:05:52,735 ave[111.508 ms]
+2021-12-30 11:05:52,735 .50[111.508 ms]
+2021-12-30 11:05:52,735 .60[111.508 ms]
+2021-12-30 11:05:52,735 .70[111.508 ms]
+2021-12-30 11:05:52,736 .80[111.508 ms]
+2021-12-30 11:05:52,736 .90[111.508 ms]
+2021-12-30 11:05:52,736 .95[111.508 ms]
+2021-12-30 11:05:52,736 .99[111.508 ms]
+2021-12-30 11:05:52,736 Channel (server worker num[20]):
+2021-12-30 11:05:52,737 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:05:52,738 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:05:54,677 ==================== TRACER ======================
+2021-12-30 11:05:54,679 Channel (server worker num[20]):
+2021-12-30 11:05:54,681 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:05:54,682 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:06:24,712 ==================== TRACER ======================
+2021-12-30 11:06:24,714 Op(ppyolo_mbv3):
+2021-12-30 11:06:24,714 in[3.782 ms]
+2021-12-30 11:06:24,714 prep[61.954 ms]
+2021-12-30 11:06:24,715 midp[1570.405 ms]
+2021-12-30 11:06:24,715 postp[10.504 ms]
+2021-12-30 11:06:24,715 out[1.288 ms]
+2021-12-30 11:06:24,715 idle[0.003076581390141553]
+2021-12-30 11:06:24,715 DAGExecutor:
+2021-12-30 11:06:24,715 Query count[1]
+2021-12-30 11:06:24,716 QPS[0.03333333333333333 q/s]
+2021-12-30 11:06:24,716 Succ[1.0]
+2021-12-30 11:06:24,716 Error req[]
+2021-12-30 11:06:24,716 Latency:
+2021-12-30 11:06:24,716 ave[2632.994 ms]
+2021-12-30 11:06:24,716 .50[2632.994 ms]
+2021-12-30 11:06:24,717 .60[2632.994 ms]
+2021-12-30 11:06:24,717 .70[2632.994 ms]
+2021-12-30 11:06:24,717 .80[2632.994 ms]
+2021-12-30 11:06:24,717 .90[2632.994 ms]
+2021-12-30 11:06:24,717 .95[2632.994 ms]
+2021-12-30 11:06:24,718 .99[2632.994 ms]
+2021-12-30 11:06:24,718 Channel (server worker num[20]):
+2021-12-30 11:06:24,718 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:06:24,719 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:06:54,750 ==================== TRACER ======================
+2021-12-30 11:06:54,750 Channel (server worker num[20]):
+2021-12-30 11:06:54,751 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:06:54,752 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:07:24,782 ==================== TRACER ======================
+2021-12-30 11:07:24,783 Channel (server worker num[20]):
+2021-12-30 11:07:24,784 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:07:24,785 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:07:54,815 ==================== TRACER ======================
+2021-12-30 11:07:54,816 Channel (server worker num[20]):
+2021-12-30 11:07:54,817 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:07:54,818 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:08:24,848 ==================== TRACER ======================
+2021-12-30 11:08:24,849 Channel (server worker num[20]):
+2021-12-30 11:08:24,850 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:08:24,851 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:08:54,881 ==================== TRACER ======================
+2021-12-30 11:08:54,882 Channel (server worker num[20]):
+2021-12-30 11:08:54,883 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:08:54,883 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:09:24,914 ==================== TRACER ======================
+2021-12-30 11:09:24,915 Channel (server worker num[20]):
+2021-12-30 11:09:24,915 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:09:24,916 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:09:54,946 ==================== TRACER ======================
+2021-12-30 11:09:54,947 Channel (server worker num[20]):
+2021-12-30 11:09:54,948 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:09:54,949 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:10:24,979 ==================== TRACER ======================
+2021-12-30 11:10:24,980 Channel (server worker num[20]):
+2021-12-30 11:10:24,981 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:10:24,982 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:10:55,012 ==================== TRACER ======================
+2021-12-30 11:10:55,013 Channel (server worker num[20]):
+2021-12-30 11:10:55,014 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:10:55,015 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:11:25,045 ==================== TRACER ======================
+2021-12-30 11:11:25,046 Channel (server worker num[20]):
+2021-12-30 11:11:25,047 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:11:25,047 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:11:55,078 ==================== TRACER ======================
+2021-12-30 11:11:55,079 Channel (server worker num[20]):
+2021-12-30 11:11:55,080 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:11:55,080 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:12:25,111 ==================== TRACER ======================
+2021-12-30 11:12:25,112 Channel (server worker num[20]):
+2021-12-30 11:12:25,112 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:12:25,113 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:12:55,144 ==================== TRACER ======================
+2021-12-30 11:12:55,144 Channel (server worker num[20]):
+2021-12-30 11:12:55,145 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:12:55,146 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:13:25,176 ==================== TRACER ======================
+2021-12-30 11:13:25,177 Channel (server worker num[20]):
+2021-12-30 11:13:25,178 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:13:25,179 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:13:55,209 ==================== TRACER ======================
+2021-12-30 11:13:55,210 Channel (server worker num[20]):
+2021-12-30 11:13:55,211 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:13:55,212 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:14:25,242 ==================== TRACER ======================
+2021-12-30 11:14:25,243 Channel (server worker num[20]):
+2021-12-30 11:14:25,244 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:14:25,244 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:14:55,275 ==================== TRACER ======================
+2021-12-30 11:14:55,276 Channel (server worker num[20]):
+2021-12-30 11:14:55,276 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:14:55,277 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:15:25,308 ==================== TRACER ======================
+2021-12-30 11:15:25,308 Channel (server worker num[20]):
+2021-12-30 11:15:25,309 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:15:25,310 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:15:55,340 ==================== TRACER ======================
+2021-12-30 11:15:55,341 DAGExecutor:
+2021-12-30 11:15:55,343 Query count[1]
+2021-12-30 11:15:55,343 QPS[0.03333333333333333 q/s]
+2021-12-30 11:15:55,343 Succ[0.0]
+2021-12-30 11:15:55,344 Error req[1]
+2021-12-30 11:15:55,344 Latency:
+2021-12-30 11:15:55,344 ave[115.746 ms]
+2021-12-30 11:15:55,344 .50[115.746 ms]
+2021-12-30 11:15:55,344 .60[115.746 ms]
+2021-12-30 11:15:55,344 .70[115.746 ms]
+2021-12-30 11:15:55,345 .80[115.746 ms]
+2021-12-30 11:15:55,345 .90[115.746 ms]
+2021-12-30 11:15:55,345 .95[115.746 ms]
+2021-12-30 11:15:55,345 .99[115.746 ms]
+2021-12-30 11:15:55,345 Channel (server worker num[20]):
+2021-12-30 11:15:55,346 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:15:55,347 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:16:25,377 ==================== TRACER ======================
+2021-12-30 11:16:25,378 Channel (server worker num[20]):
+2021-12-30 11:16:25,379 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:16:25,380 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:16:55,410 ==================== TRACER ======================
+2021-12-30 11:16:55,411 Channel (server worker num[20]):
+2021-12-30 11:16:55,412 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:16:55,412 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:17:25,443 ==================== TRACER ======================
+2021-12-30 11:17:25,444 Channel (server worker num[20]):
+2021-12-30 11:17:25,444 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:17:25,445 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:17:55,476 ==================== TRACER ======================
+2021-12-30 11:17:55,476 Channel (server worker num[20]):
+2021-12-30 11:17:55,477 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:17:55,478 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:18:25,508 ==================== TRACER ======================
+2021-12-30 11:18:25,509 Channel (server worker num[20]):
+2021-12-30 11:18:25,510 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:18:25,511 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:18:55,513 ==================== TRACER ======================
+2021-12-30 11:18:55,514 Channel (server worker num[20]):
+2021-12-30 11:18:55,515 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:18:55,515 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:19:25,546 ==================== TRACER ======================
+2021-12-30 11:19:25,546 Channel (server worker num[20]):
+2021-12-30 11:19:25,547 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:19:25,548 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:19:55,578 ==================== TRACER ======================
+2021-12-30 11:19:55,579 Channel (server worker num[20]):
+2021-12-30 11:19:55,580 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:19:55,581 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:20:25,611 ==================== TRACER ======================
+2021-12-30 11:20:25,612 Channel (server worker num[20]):
+2021-12-30 11:20:25,613 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:20:25,614 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:20:55,644 ==================== TRACER ======================
+2021-12-30 11:20:55,645 Channel (server worker num[20]):
+2021-12-30 11:20:55,646 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:20:55,647 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:21:25,664 ==================== TRACER ======================
+2021-12-30 11:21:25,665 Channel (server worker num[20]):
+2021-12-30 11:21:25,666 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:21:25,667 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:21:55,697 ==================== TRACER ======================
+2021-12-30 11:21:55,698 Channel (server worker num[20]):
+2021-12-30 11:21:55,699 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:21:55,699 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:22:25,730 ==================== TRACER ======================
+2021-12-30 11:22:25,731 Channel (server worker num[20]):
+2021-12-30 11:22:25,731 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:22:25,732 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:22:55,762 ==================== TRACER ======================
+2021-12-30 11:22:55,763 Channel (server worker num[20]):
+2021-12-30 11:22:55,764 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:22:55,765 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:23:25,768 ==================== TRACER ======================
+2021-12-30 11:23:25,769 Channel (server worker num[20]):
+2021-12-30 11:23:25,770 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:23:25,771 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:23:55,801 ==================== TRACER ======================
+2021-12-30 11:23:55,802 Channel (server worker num[20]):
+2021-12-30 11:23:55,803 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:23:55,803 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:24:24,537 ==================== TRACER ======================
+2021-12-30 11:24:24,538 Channel (server worker num[20]):
+2021-12-30 11:24:24,540 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:24:24,540 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:24:54,571 ==================== TRACER ======================
+2021-12-30 11:24:54,572 Op(ppyolo_mbv3):
+2021-12-30 11:24:54,573 in[1233.596 ms]
+2021-12-30 11:24:54,573 prep[79.632 ms]
+2021-12-30 11:24:54,573 midp[1828.419 ms]
+2021-12-30 11:24:54,573 postp[25.206 ms]
+2021-12-30 11:24:54,573 out[1.554 ms]
+2021-12-30 11:24:54,574 idle[0.38983312434292694]
+2021-12-30 11:24:54,574 DAGExecutor:
+2021-12-30 11:24:54,574 Query count[2]
+2021-12-30 11:24:54,574 QPS[0.06666666666666667 q/s]
+2021-12-30 11:24:54,574 Succ[0.5]
+2021-12-30 11:24:54,574 Error req[1]
+2021-12-30 11:24:54,575 Latency:
+2021-12-30 11:24:54,575 ave[1038.919 ms]
+2021-12-30 11:24:54,575 .50[1947.202 ms]
+2021-12-30 11:24:54,575 .60[1947.202 ms]
+2021-12-30 11:24:54,575 .70[1947.202 ms]
+2021-12-30 11:24:54,575 .80[1947.202 ms]
+2021-12-30 11:24:54,576 .90[1947.202 ms]
+2021-12-30 11:24:54,576 .95[1947.202 ms]
+2021-12-30 11:24:54,576 .99[1947.202 ms]
+2021-12-30 11:24:54,576 Channel (server worker num[20]):
+2021-12-30 11:24:54,577 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:24:54,578 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:25:24,608 ==================== TRACER ======================
+2021-12-30 11:25:24,609 Channel (server worker num[20]):
+2021-12-30 11:25:24,610 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:25:24,610 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:37:49,731 ==================== TRACER ======================
+2021-12-30 11:37:49,733 Channel (server worker num[20]):
+2021-12-30 11:37:49,735 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:37:49,736 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:38:09,368 ==================== TRACER ======================
+2021-12-30 11:38:09,370 Channel (server worker num[20]):
+2021-12-30 11:38:09,373 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:38:09,373 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:38:39,404 ==================== TRACER ======================
+2021-12-30 11:38:39,405 DAGExecutor:
+2021-12-30 11:38:39,405 Query count[1]
+2021-12-30 11:38:39,405 QPS[0.03333333333333333 q/s]
+2021-12-30 11:38:39,406 Succ[0.0]
+2021-12-30 11:38:39,406 Error req[0]
+2021-12-30 11:38:39,406 Latency:
+2021-12-30 11:38:39,406 ave[2798.136 ms]
+2021-12-30 11:38:39,406 .50[2798.136 ms]
+2021-12-30 11:38:39,406 .60[2798.136 ms]
+2021-12-30 11:38:39,407 .70[2798.136 ms]
+2021-12-30 11:38:39,407 .80[2798.136 ms]
+2021-12-30 11:38:39,407 .90[2798.136 ms]
+2021-12-30 11:38:39,407 .95[2798.136 ms]
+2021-12-30 11:38:39,407 .99[2798.136 ms]
+2021-12-30 11:38:39,407 Channel (server worker num[20]):
+2021-12-30 11:38:39,408 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:38:39,409 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:39:09,439 ==================== TRACER ======================
+2021-12-30 11:39:09,440 Channel (server worker num[20]):
+2021-12-30 11:39:09,441 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:39:09,442 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:39:39,472 ==================== TRACER ======================
+2021-12-30 11:39:39,473 Channel (server worker num[20]):
+2021-12-30 11:39:39,474 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:39:39,474 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:40:05,674 ==================== TRACER ======================
+2021-12-30 11:40:05,677 Channel (server worker num[20]):
+2021-12-30 11:40:05,679 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:40:05,679 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:40:35,710 ==================== TRACER ======================
+2021-12-30 11:40:35,712 Op(ppyolo_mbv3):
+2021-12-30 11:40:35,712 in[1703.2785 ms]
+2021-12-30 11:40:35,712 prep[66.81 ms]
+2021-12-30 11:40:35,713 midp[792.723 ms]
+2021-12-30 11:40:35,713 postp[10.012 ms]
+2021-12-30 11:40:35,713 out[1.1055 ms]
+2021-12-30 11:40:35,713 idle[0.6621721111965404]
+2021-12-30 11:40:35,713 DAGExecutor:
+2021-12-30 11:40:35,713 Query count[2]
+2021-12-30 11:40:35,714 QPS[0.06666666666666667 q/s]
+2021-12-30 11:40:35,714 Succ[1.0]
+2021-12-30 11:40:35,714 Error req[]
+2021-12-30 11:40:35,714 Latency:
+2021-12-30 11:40:35,714 ave[880.886 ms]
+2021-12-30 11:40:35,714 .50[1658.566 ms]
+2021-12-30 11:40:35,715 .60[1658.566 ms]
+2021-12-30 11:40:35,715 .70[1658.566 ms]
+2021-12-30 11:40:35,715 .80[1658.566 ms]
+2021-12-30 11:40:35,715 .90[1658.566 ms]
+2021-12-30 11:40:35,715 .95[1658.566 ms]
+2021-12-30 11:40:35,715 .99[1658.566 ms]
+2021-12-30 11:40:35,716 Channel (server worker num[20]):
+2021-12-30 11:40:35,716 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:40:35,717 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:41:05,746 ==================== TRACER ======================
+2021-12-30 11:41:05,747 Op(ppyolo_mbv3):
+2021-12-30 11:41:05,748 in[45842.715 ms]
+2021-12-30 11:41:05,748 prep[76.967 ms]
+2021-12-30 11:41:05,748 midp[18.287 ms]
+2021-12-30 11:41:05,748 postp[9.692 ms]
+2021-12-30 11:41:05,748 out[1.296 ms]
+2021-12-30 11:41:05,749 idle[0.9977160308557167]
+2021-12-30 11:41:05,749 DAGExecutor:
+2021-12-30 11:41:05,749 Query count[1]
+2021-12-30 11:41:05,749 QPS[0.03333333333333333 q/s]
+2021-12-30 11:41:05,749 Succ[1.0]
+2021-12-30 11:41:05,750 Error req[]
+2021-12-30 11:41:05,750 Latency:
+2021-12-30 11:41:05,750 ave[118.137 ms]
+2021-12-30 11:41:05,750 .50[118.137 ms]
+2021-12-30 11:41:05,750 .60[118.137 ms]
+2021-12-30 11:41:05,750 .70[118.137 ms]
+2021-12-30 11:41:05,751 .80[118.137 ms]
+2021-12-30 11:41:05,751 .90[118.137 ms]
+2021-12-30 11:41:05,751 .95[118.137 ms]
+2021-12-30 11:41:05,751 .99[118.137 ms]
+2021-12-30 11:41:05,751 Channel (server worker num[20]):
+2021-12-30 11:41:05,752 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:41:05,753 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:41:27,033 ==================== TRACER ======================
+2021-12-30 11:41:27,035 Channel (server worker num[20]):
+2021-12-30 11:41:27,037 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:41:27,038 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:41:57,060 ==================== TRACER ======================
+2021-12-30 11:41:57,062 Op(ppyolo_mbv3):
+2021-12-30 11:41:57,062 in[1008.0535 ms]
+2021-12-30 11:41:57,063 prep[61.519 ms]
+2021-12-30 11:41:57,063 midp[796.019 ms]
+2021-12-30 11:41:57,063 postp[10.4375 ms]
+2021-12-30 11:41:57,063 out[1.295 ms]
+2021-12-30 11:41:57,063 idle[0.537652797279532]
+2021-12-30 11:41:57,063 DAGExecutor:
+2021-12-30 11:41:57,064 Query count[2]
+2021-12-30 11:41:57,064 QPS[0.06666666666666667 q/s]
+2021-12-30 11:41:57,064 Succ[1.0]
+2021-12-30 11:41:57,064 Error req[]
+2021-12-30 11:41:57,064 Latency:
+2021-12-30 11:41:57,065 ave[1179.855 ms]
+2021-12-30 11:41:57,065 .50[2258.924 ms]
+2021-12-30 11:41:57,065 .60[2258.924 ms]
+2021-12-30 11:41:57,065 .70[2258.924 ms]
+2021-12-30 11:41:57,065 .80[2258.924 ms]
+2021-12-30 11:41:57,065 .90[2258.924 ms]
+2021-12-30 11:41:57,066 .95[2258.924 ms]
+2021-12-30 11:41:57,066 .99[2258.924 ms]
+2021-12-30 11:41:57,066 Channel (server worker num[20]):
+2021-12-30 11:41:57,067 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:41:57,067 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:42:27,098 ==================== TRACER ======================
+2021-12-30 11:42:27,099 Channel (server worker num[20]):
+2021-12-30 11:42:27,099 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:42:27,100 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:42:57,131 ==================== TRACER ======================
+2021-12-30 11:42:57,131 Channel (server worker num[20]):
+2021-12-30 11:42:57,132 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:42:57,133 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:43:27,163 ==================== TRACER ======================
+2021-12-30 11:43:27,164 Channel (server worker num[20]):
+2021-12-30 11:43:27,165 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:43:27,166 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:43:57,196 ==================== TRACER ======================
+2021-12-30 11:43:57,197 Channel (server worker num[20]):
+2021-12-30 11:43:57,198 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:43:57,199 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:44:27,229 ==================== TRACER ======================
+2021-12-30 11:44:27,230 Channel (server worker num[20]):
+2021-12-30 11:44:27,231 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:44:27,231 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:44:57,262 ==================== TRACER ======================
+2021-12-30 11:44:57,263 Channel (server worker num[20]):
+2021-12-30 11:44:57,264 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:44:57,264 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:45:27,295 ==================== TRACER ======================
+2021-12-30 11:45:27,296 Channel (server worker num[20]):
+2021-12-30 11:45:27,296 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:45:27,297 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:45:57,328 ==================== TRACER ======================
+2021-12-30 11:45:57,328 Channel (server worker num[20]):
+2021-12-30 11:45:57,329 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:45:57,330 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:46:27,360 ==================== TRACER ======================
+2021-12-30 11:46:27,361 Channel (server worker num[20]):
+2021-12-30 11:46:27,362 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:46:27,363 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:46:57,393 ==================== TRACER ======================
+2021-12-30 11:46:57,394 Channel (server worker num[20]):
+2021-12-30 11:46:57,395 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:46:57,396 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:47:27,426 ==================== TRACER ======================
+2021-12-30 11:47:27,427 Channel (server worker num[20]):
+2021-12-30 11:47:27,428 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:47:27,428 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:47:57,459 ==================== TRACER ======================
+2021-12-30 11:47:57,460 Channel (server worker num[20]):
+2021-12-30 11:47:57,460 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:47:57,461 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:48:27,492 ==================== TRACER ======================
+2021-12-30 11:48:27,492 Channel (server worker num[20]):
+2021-12-30 11:48:27,493 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:48:27,494 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:48:57,524 ==================== TRACER ======================
+2021-12-30 11:48:57,525 Channel (server worker num[20]):
+2021-12-30 11:48:57,526 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:48:57,527 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:49:27,557 ==================== TRACER ======================
+2021-12-30 11:49:27,558 Channel (server worker num[20]):
+2021-12-30 11:49:27,559 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:49:27,560 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:49:57,590 ==================== TRACER ======================
+2021-12-30 11:49:57,591 Channel (server worker num[20]):
+2021-12-30 11:49:57,592 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:49:57,593 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2021-12-30 11:50:27,623 ==================== TRACER ======================
+2021-12-30 11:50:27,624 Channel (server worker num[20]):
+2021-12-30 11:50:27,625 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2021-12-30 11:50:27,625 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-14 09:24:33,177 ==================== TRACER ======================
+2022-02-14 09:24:33,179 Channel (server worker num[20]):
+2022-02-14 09:24:33,182 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-14 09:24:33,182 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-14 09:24:42,822 ==================== TRACER ======================
+2022-02-14 09:24:42,824 Channel (server worker num[20]):
+2022-02-14 09:24:42,827 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-14 09:24:42,827 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-14 09:26:03,784 ==================== TRACER ======================
+2022-02-14 09:26:03,786 Channel (server worker num[20]):
+2022-02-14 09:26:03,789 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-14 09:26:03,789 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 16:56:51,916 ==================== TRACER ======================
+2022-02-16 16:56:51,917 Channel (server worker num[20]):
+2022-02-16 16:56:51,918 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 16:56:51,918 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:05:23,231 ==================== TRACER ======================
+2022-02-16 17:05:23,232 Channel (server worker num[20]):
+2022-02-16 17:05:23,233 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:05:23,234 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:05:53,260 ==================== TRACER ======================
+2022-02-16 17:05:53,261 Op(ppyolo_mbv3):
+2022-02-16 17:05:53,261 in[10785.568 ms]
+2022-02-16 17:05:53,261 prep[48.4655 ms]
+2022-02-16 17:05:53,261 midp[1298.1825 ms]
+2022-02-16 17:05:53,261 postp[9.903 ms]
+2022-02-16 17:05:53,261 out[0.8555 ms]
+2022-02-16 17:05:53,261 idle[0.888285114985624]
+2022-02-16 17:05:53,261 DAGExecutor:
+2022-02-16 17:05:53,261 Query count[2]
+2022-02-16 17:05:53,262 QPS[0.06666666666666667 q/s]
+2022-02-16 17:05:53,262 Succ[1.0]
+2022-02-16 17:05:53,262 Error req[]
+2022-02-16 17:05:53,262 Latency:
+2022-02-16 17:05:53,262 ave[1365.0625 ms]
+2022-02-16 17:05:53,262 .50[2649.873 ms]
+2022-02-16 17:05:53,262 .60[2649.873 ms]
+2022-02-16 17:05:53,262 .70[2649.873 ms]
+2022-02-16 17:05:53,262 .80[2649.873 ms]
+2022-02-16 17:05:53,262 .90[2649.873 ms]
+2022-02-16 17:05:53,262 .95[2649.873 ms]
+2022-02-16 17:05:53,262 .99[2649.873 ms]
+2022-02-16 17:05:53,262 Channel (server worker num[20]):
+2022-02-16 17:05:53,262 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:05:53,263 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:06:23,264 ==================== TRACER ======================
+2022-02-16 17:06:23,265 Channel (server worker num[20]):
+2022-02-16 17:06:23,265 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:06:23,265 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:06:53,294 ==================== TRACER ======================
+2022-02-16 17:06:53,294 Channel (server worker num[20]):
+2022-02-16 17:06:53,295 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:06:53,295 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:07:23,303 ==================== TRACER ======================
+2022-02-16 17:07:23,304 Op(ppyolo_mbv3):
+2022-02-16 17:07:23,304 in[69773.431 ms]
+2022-02-16 17:07:23,304 prep[41.191 ms]
+2022-02-16 17:07:23,304 midp[11.872 ms]
+2022-02-16 17:07:23,305 postp[9.658 ms]
+2022-02-16 17:07:23,305 out[0.679 ms]
+2022-02-16 17:07:23,305 idle[0.9991018922379225]
+2022-02-16 17:07:23,305 DAGExecutor:
+2022-02-16 17:07:23,305 Query count[1]
+2022-02-16 17:07:23,305 QPS[0.03333333333333333 q/s]
+2022-02-16 17:07:23,305 Succ[1.0]
+2022-02-16 17:07:23,305 Error req[]
+2022-02-16 17:07:23,305 Latency:
+2022-02-16 17:07:23,305 ave[69.518 ms]
+2022-02-16 17:07:23,305 .50[69.518 ms]
+2022-02-16 17:07:23,305 .60[69.518 ms]
+2022-02-16 17:07:23,305 .70[69.518 ms]
+2022-02-16 17:07:23,305 .80[69.518 ms]
+2022-02-16 17:07:23,305 .90[69.518 ms]
+2022-02-16 17:07:23,305 .95[69.518 ms]
+2022-02-16 17:07:23,305 .99[69.518 ms]
+2022-02-16 17:07:23,305 Channel (server worker num[20]):
+2022-02-16 17:07:23,306 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:07:23,306 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:09:22,123 ==================== TRACER ======================
+2022-02-16 17:09:22,124 Channel (server worker num[20]):
+2022-02-16 17:09:22,125 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:09:22,126 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:09:52,156 ==================== TRACER ======================
+2022-02-16 17:09:52,157 Channel (server worker num[20]):
+2022-02-16 17:09:52,157 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:09:52,157 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:10:22,184 ==================== TRACER ======================
+2022-02-16 17:10:22,185 Op(ppyolo_mbv3):
+2022-02-16 17:10:22,185 in[35440.544 ms]
+2022-02-16 17:10:22,185 prep[42.793 ms]
+2022-02-16 17:10:22,185 midp[2504.427 ms]
+2022-02-16 17:10:22,185 postp[10.631 ms]
+2022-02-16 17:10:22,185 out[0.959 ms]
+2022-02-16 17:10:22,186 idle[0.9326869872577308]
+2022-02-16 17:10:22,186 DAGExecutor:
+2022-02-16 17:10:22,186 Query count[1]
+2022-02-16 17:10:22,186 QPS[0.03333333333333333 q/s]
+2022-02-16 17:10:22,186 Succ[1.0]
+2022-02-16 17:10:22,186 Error req[]
+2022-02-16 17:10:22,186 Latency:
+2022-02-16 17:10:22,186 ave[2566.559 ms]
+2022-02-16 17:10:22,186 .50[2566.559 ms]
+2022-02-16 17:10:22,186 .60[2566.559 ms]
+2022-02-16 17:10:22,186 .70[2566.559 ms]
+2022-02-16 17:10:22,186 .80[2566.559 ms]
+2022-02-16 17:10:22,186 .90[2566.559 ms]
+2022-02-16 17:10:22,186 .95[2566.559 ms]
+2022-02-16 17:10:22,186 .99[2566.559 ms]
+2022-02-16 17:10:22,186 Channel (server worker num[20]):
+2022-02-16 17:10:22,187 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:10:22,187 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:10:52,217 ==================== TRACER ======================
+2022-02-16 17:10:52,218 Channel (server worker num[20]):
+2022-02-16 17:10:52,219 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:10:52,219 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:11:22,249 ==================== TRACER ======================
+2022-02-16 17:11:22,250 Channel (server worker num[20]):
+2022-02-16 17:11:22,250 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:11:22,250 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:11:52,264 ==================== TRACER ======================
+2022-02-16 17:11:52,265 Channel (server worker num[20]):
+2022-02-16 17:11:52,265 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:11:52,265 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:12:22,292 ==================== TRACER ======================
+2022-02-16 17:12:22,292 Channel (server worker num[20]):
+2022-02-16 17:12:22,293 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:12:22,293 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:12:52,319 ==================== TRACER ======================
+2022-02-16 17:12:52,320 Channel (server worker num[20]):
+2022-02-16 17:12:52,321 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:12:52,321 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:13:22,323 ==================== TRACER ======================
+2022-02-16 17:13:22,324 Channel (server worker num[20]):
+2022-02-16 17:13:22,324 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:13:22,324 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:13:52,354 ==================== TRACER ======================
+2022-02-16 17:13:52,355 Channel (server worker num[20]):
+2022-02-16 17:13:52,355 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:13:52,356 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:14:22,373 ==================== TRACER ======================
+2022-02-16 17:14:22,374 Channel (server worker num[20]):
+2022-02-16 17:14:22,374 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:14:22,374 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:14:52,404 ==================== TRACER ======================
+2022-02-16 17:14:52,405 Op(ppyolo_mbv3):
+2022-02-16 17:14:52,405 in[289118.996 ms]
+2022-02-16 17:14:52,405 prep[46.16 ms]
+2022-02-16 17:14:52,405 midp[11.854 ms]
+2022-02-16 17:14:52,405 postp[9.602 ms]
+2022-02-16 17:14:52,405 out[0.799 ms]
+2022-02-16 17:14:52,405 idle[0.9997661862258589]
+2022-02-16 17:14:52,405 DAGExecutor:
+2022-02-16 17:14:52,405 Query count[1]
+2022-02-16 17:14:52,406 QPS[0.03333333333333333 q/s]
+2022-02-16 17:14:52,406 Succ[1.0]
+2022-02-16 17:14:52,406 Error req[]
+2022-02-16 17:14:52,406 Latency:
+2022-02-16 17:14:52,406 ave[76.35 ms]
+2022-02-16 17:14:52,406 .50[76.35 ms]
+2022-02-16 17:14:52,406 .60[76.35 ms]
+2022-02-16 17:14:52,406 .70[76.35 ms]
+2022-02-16 17:14:52,406 .80[76.35 ms]
+2022-02-16 17:14:52,406 .90[76.35 ms]
+2022-02-16 17:14:52,406 .95[76.35 ms]
+2022-02-16 17:14:52,406 .99[76.35 ms]
+2022-02-16 17:14:52,406 Channel (server worker num[20]):
+2022-02-16 17:14:52,406 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:14:52,407 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:15:22,428 ==================== TRACER ======================
+2022-02-16 17:15:22,429 Op(ppyolo_mbv3):
+2022-02-16 17:15:22,429 in[14226.853 ms]
+2022-02-16 17:15:22,430 prep[40.298 ms]
+2022-02-16 17:15:22,430 midp[11.486 ms]
+2022-02-16 17:15:22,430 postp[9.523 ms]
+2022-02-16 17:15:22,430 out[0.712 ms]
+2022-02-16 17:15:22,430 idle[0.9957094583813194]
+2022-02-16 17:15:22,430 DAGExecutor:
+2022-02-16 17:15:22,430 Query count[1]
+2022-02-16 17:15:22,430 QPS[0.03333333333333333 q/s]
+2022-02-16 17:15:22,430 Succ[1.0]
+2022-02-16 17:15:22,430 Error req[]
+2022-02-16 17:15:22,430 Latency:
+2022-02-16 17:15:22,430 ave[68.343 ms]
+2022-02-16 17:15:22,430 .50[68.343 ms]
+2022-02-16 17:15:22,430 .60[68.343 ms]
+2022-02-16 17:15:22,430 .70[68.343 ms]
+2022-02-16 17:15:22,430 .80[68.343 ms]
+2022-02-16 17:15:22,430 .90[68.343 ms]
+2022-02-16 17:15:22,430 .95[68.343 ms]
+2022-02-16 17:15:22,430 .99[68.343 ms]
+2022-02-16 17:15:22,430 Channel (server worker num[20]):
+2022-02-16 17:15:22,431 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:15:22,431 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:15:52,452 ==================== TRACER ======================
+2022-02-16 17:15:52,452 Channel (server worker num[20]):
+2022-02-16 17:15:52,453 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:15:52,453 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:16:22,472 ==================== TRACER ======================
+2022-02-16 17:16:22,473 Channel (server worker num[20]):
+2022-02-16 17:16:22,473 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:16:22,474 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:16:52,504 ==================== TRACER ======================
+2022-02-16 17:16:52,504 Channel (server worker num[20]):
+2022-02-16 17:16:52,505 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:16:52,505 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:17:22,529 ==================== TRACER ======================
+2022-02-16 17:17:22,530 Channel (server worker num[20]):
+2022-02-16 17:17:22,530 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:17:22,530 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:17:52,560 ==================== TRACER ======================
+2022-02-16 17:17:52,561 Channel (server worker num[20]):
+2022-02-16 17:17:52,562 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:17:52,562 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:18:22,592 ==================== TRACER ======================
+2022-02-16 17:18:22,593 Channel (server worker num[20]):
+2022-02-16 17:18:22,593 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:18:22,593 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:18:52,624 ==================== TRACER ======================
+2022-02-16 17:18:52,624 Channel (server worker num[20]):
+2022-02-16 17:18:52,625 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:18:52,625 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:19:22,655 ==================== TRACER ======================
+2022-02-16 17:19:22,656 Channel (server worker num[20]):
+2022-02-16 17:19:22,656 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:19:22,656 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:19:52,679 ==================== TRACER ======================
+2022-02-16 17:19:52,679 Channel (server worker num[20]):
+2022-02-16 17:19:52,680 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:19:52,680 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:20:22,710 ==================== TRACER ======================
+2022-02-16 17:20:22,711 Channel (server worker num[20]):
+2022-02-16 17:20:22,711 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:20:22,711 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:20:52,740 ==================== TRACER ======================
+2022-02-16 17:20:52,741 Channel (server worker num[20]):
+2022-02-16 17:20:52,741 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:20:52,741 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:21:22,771 ==================== TRACER ======================
+2022-02-16 17:21:22,772 Channel (server worker num[20]):
+2022-02-16 17:21:22,772 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:21:22,773 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:21:52,795 ==================== TRACER ======================
+2022-02-16 17:21:52,795 Op(ppyolo_mbv3):
+2022-02-16 17:21:52,796 in[378619.8 ms]
+2022-02-16 17:21:52,796 prep[40.567 ms]
+2022-02-16 17:21:52,796 midp[11.664 ms]
+2022-02-16 17:21:52,796 postp[9.171 ms]
+2022-02-16 17:21:52,796 out[0.723 ms]
+2022-02-16 17:21:52,796 idle[0.9998378533646675]
+2022-02-16 17:21:52,796 DAGExecutor:
+2022-02-16 17:21:52,796 Query count[1]
+2022-02-16 17:21:52,796 QPS[0.03333333333333333 q/s]
+2022-02-16 17:21:52,796 Succ[1.0]
+2022-02-16 17:21:52,796 Error req[]
+2022-02-16 17:21:52,796 Latency:
+2022-02-16 17:21:52,796 ave[67.215 ms]
+2022-02-16 17:21:52,796 .50[67.215 ms]
+2022-02-16 17:21:52,796 .60[67.215 ms]
+2022-02-16 17:21:52,796 .70[67.215 ms]
+2022-02-16 17:21:52,796 .80[67.215 ms]
+2022-02-16 17:21:52,796 .90[67.215 ms]
+2022-02-16 17:21:52,796 .95[67.215 ms]
+2022-02-16 17:21:52,796 .99[67.215 ms]
+2022-02-16 17:21:52,796 Channel (server worker num[20]):
+2022-02-16 17:21:52,797 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:21:52,797 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:22:22,827 ==================== TRACER ======================
+2022-02-16 17:22:22,828 Op(ppyolo_mbv3):
+2022-02-16 17:22:22,828 in[28828.316 ms]
+2022-02-16 17:22:22,828 prep[40.625 ms]
+2022-02-16 17:22:22,828 midp[11.704 ms]
+2022-02-16 17:22:22,828 postp[9.511 ms]
+2022-02-16 17:22:22,828 out[0.838 ms]
+2022-02-16 17:22:22,828 idle[0.9978595405890154]
+2022-02-16 17:22:22,828 DAGExecutor:
+2022-02-16 17:22:22,828 Query count[1]
+2022-02-16 17:22:22,828 QPS[0.03333333333333333 q/s]
+2022-02-16 17:22:22,828 Succ[1.0]
+2022-02-16 17:22:22,828 Error req[]
+2022-02-16 17:22:22,828 Latency:
+2022-02-16 17:22:22,829 ave[69.462 ms]
+2022-02-16 17:22:22,829 .50[69.462 ms]
+2022-02-16 17:22:22,829 .60[69.462 ms]
+2022-02-16 17:22:22,829 .70[69.462 ms]
+2022-02-16 17:22:22,829 .80[69.462 ms]
+2022-02-16 17:22:22,829 .90[69.462 ms]
+2022-02-16 17:22:22,829 .95[69.462 ms]
+2022-02-16 17:22:22,829 .99[69.462 ms]
+2022-02-16 17:22:22,829 Channel (server worker num[20]):
+2022-02-16 17:22:22,829 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:22:22,829 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:22:52,860 ==================== TRACER ======================
+2022-02-16 17:22:52,860 Op(ppyolo_mbv3):
+2022-02-16 17:22:52,860 in[53789.073 ms]
+2022-02-16 17:22:52,861 prep[41.018 ms]
+2022-02-16 17:22:52,861 midp[11.874 ms]
+2022-02-16 17:22:52,861 postp[9.638 ms]
+2022-02-16 17:22:52,861 out[0.767 ms]
+2022-02-16 17:22:52,861 idle[0.9988388626164456]
+2022-02-16 17:22:52,861 DAGExecutor:
+2022-02-16 17:22:52,861 Query count[1]
+2022-02-16 17:22:52,861 QPS[0.03333333333333333 q/s]
+2022-02-16 17:22:52,861 Succ[1.0]
+2022-02-16 17:22:52,861 Error req[]
+2022-02-16 17:22:52,861 Latency:
+2022-02-16 17:22:52,861 ave[69.775 ms]
+2022-02-16 17:22:52,861 .50[69.775 ms]
+2022-02-16 17:22:52,861 .60[69.775 ms]
+2022-02-16 17:22:52,861 .70[69.775 ms]
+2022-02-16 17:22:52,861 .80[69.775 ms]
+2022-02-16 17:22:52,861 .90[69.775 ms]
+2022-02-16 17:22:52,861 .95[69.775 ms]
+2022-02-16 17:22:52,861 .99[69.775 ms]
+2022-02-16 17:22:52,861 Channel (server worker num[20]):
+2022-02-16 17:22:52,862 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:22:52,862 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:23:22,869 ==================== TRACER ======================
+2022-02-16 17:23:22,870 Op(ppyolo_mbv3):
+2022-02-16 17:23:22,870 in[16002.513 ms]
+2022-02-16 17:23:22,870 prep[40.564 ms]
+2022-02-16 17:23:22,870 midp[11.608 ms]
+2022-02-16 17:23:22,870 postp[9.549 ms]
+2022-02-16 17:23:22,870 out[0.796 ms]
+2022-02-16 17:23:22,870 idle[0.9961580526149033]
+2022-02-16 17:23:22,870 DAGExecutor:
+2022-02-16 17:23:22,870 Query count[1]
+2022-02-16 17:23:22,871 QPS[0.03333333333333333 q/s]
+2022-02-16 17:23:22,871 Succ[1.0]
+2022-02-16 17:23:22,871 Error req[]
+2022-02-16 17:23:22,871 Latency:
+2022-02-16 17:23:22,871 ave[68.978 ms]
+2022-02-16 17:23:22,871 .50[68.978 ms]
+2022-02-16 17:23:22,871 .60[68.978 ms]
+2022-02-16 17:23:22,871 .70[68.978 ms]
+2022-02-16 17:23:22,871 .80[68.978 ms]
+2022-02-16 17:23:22,871 .90[68.978 ms]
+2022-02-16 17:23:22,871 .95[68.978 ms]
+2022-02-16 17:23:22,871 .99[68.978 ms]
+2022-02-16 17:23:22,871 Channel (server worker num[20]):
+2022-02-16 17:23:22,871 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:23:22,872 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:23:52,895 ==================== TRACER ======================
+2022-02-16 17:23:52,895 Op(ppyolo_mbv3):
+2022-02-16 17:23:52,896 in[26647.47 ms]
+2022-02-16 17:23:52,896 prep[41.165 ms]
+2022-02-16 17:23:52,896 midp[11.559 ms]
+2022-02-16 17:23:52,896 postp[9.357 ms]
+2022-02-16 17:23:52,896 out[0.736 ms]
+2022-02-16 17:23:52,896 idle[0.9976757643974399]
+2022-02-16 17:23:52,896 DAGExecutor:
+2022-02-16 17:23:52,896 Query count[1]
+2022-02-16 17:23:52,896 QPS[0.03333333333333333 q/s]
+2022-02-16 17:23:52,896 Succ[1.0]
+2022-02-16 17:23:52,896 Error req[]
+2022-02-16 17:23:52,896 Latency:
+2022-02-16 17:23:52,896 ave[69.043 ms]
+2022-02-16 17:23:52,896 .50[69.043 ms]
+2022-02-16 17:23:52,896 .60[69.043 ms]
+2022-02-16 17:23:52,896 .70[69.043 ms]
+2022-02-16 17:23:52,896 .80[69.043 ms]
+2022-02-16 17:23:52,896 .90[69.043 ms]
+2022-02-16 17:23:52,896 .95[69.043 ms]
+2022-02-16 17:23:52,896 .99[69.043 ms]
+2022-02-16 17:23:52,896 Channel (server worker num[20]):
+2022-02-16 17:23:52,897 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:23:52,897 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:24:22,925 ==================== TRACER ======================
+2022-02-16 17:24:22,926 Op(ppyolo_mbv3):
+2022-02-16 17:24:22,926 in[42124.841 ms]
+2022-02-16 17:24:22,926 prep[42.925 ms]
+2022-02-16 17:24:22,926 midp[12.13 ms]
+2022-02-16 17:24:22,926 postp[9.767 ms]
+2022-02-16 17:24:22,926 out[0.895 ms]
+2022-02-16 17:24:22,926 idle[0.9984635898866282]
+2022-02-16 17:24:22,926 DAGExecutor:
+2022-02-16 17:24:22,926 Query count[1]
+2022-02-16 17:24:22,926 QPS[0.03333333333333333 q/s]
+2022-02-16 17:24:22,926 Succ[1.0]
+2022-02-16 17:24:22,926 Error req[]
+2022-02-16 17:24:22,926 Latency:
+2022-02-16 17:24:22,927 ave[73.248 ms]
+2022-02-16 17:24:22,927 .50[73.248 ms]
+2022-02-16 17:24:22,927 .60[73.248 ms]
+2022-02-16 17:24:22,927 .70[73.248 ms]
+2022-02-16 17:24:22,927 .80[73.248 ms]
+2022-02-16 17:24:22,927 .90[73.248 ms]
+2022-02-16 17:24:22,927 .95[73.248 ms]
+2022-02-16 17:24:22,927 .99[73.248 ms]
+2022-02-16 17:24:22,927 Channel (server worker num[20]):
+2022-02-16 17:24:22,927 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:24:22,927 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:24:52,956 ==================== TRACER ======================
+2022-02-16 17:24:52,957 Op(ppyolo_mbv3):
+2022-02-16 17:24:52,957 in[15533.448 ms]
+2022-02-16 17:24:52,957 prep[42.1205 ms]
+2022-02-16 17:24:52,957 midp[11.651 ms]
+2022-02-16 17:24:52,957 postp[9.449 ms]
+2022-02-16 17:24:52,957 out[0.808 ms]
+2022-02-16 17:24:52,957 idle[0.9959467481807073]
+2022-02-16 17:24:52,957 DAGExecutor:
+2022-02-16 17:24:52,957 Query count[2]
+2022-02-16 17:24:52,957 QPS[0.06666666666666667 q/s]
+2022-02-16 17:24:52,957 Succ[1.0]
+2022-02-16 17:24:52,957 Error req[]
+2022-02-16 17:24:52,957 Latency:
+2022-02-16 17:24:52,957 ave[69.7875 ms]
+2022-02-16 17:24:52,957 .50[69.917 ms]
+2022-02-16 17:24:52,957 .60[69.917 ms]
+2022-02-16 17:24:52,958 .70[69.917 ms]
+2022-02-16 17:24:52,958 .80[69.917 ms]
+2022-02-16 17:24:52,958 .90[69.917 ms]
+2022-02-16 17:24:52,958 .95[69.917 ms]
+2022-02-16 17:24:52,958 .99[69.917 ms]
+2022-02-16 17:24:52,958 Channel (server worker num[20]):
+2022-02-16 17:24:52,958 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:24:52,958 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:25:22,988 ==================== TRACER ======================
+2022-02-16 17:25:22,989 Op(ppyolo_mbv3):
+2022-02-16 17:25:22,989 in[14139.5015 ms]
+2022-02-16 17:25:22,990 prep[40.245 ms]
+2022-02-16 17:25:22,990 midp[11.6935 ms]
+2022-02-16 17:25:22,990 postp[9.9075 ms]
+2022-02-16 17:25:22,990 out[0.7675 ms]
+2022-02-16 17:25:22,990 idle[0.9956452964928111]
+2022-02-16 17:25:22,990 DAGExecutor:
+2022-02-16 17:25:22,990 Query count[2]
+2022-02-16 17:25:22,990 QPS[0.06666666666666667 q/s]
+2022-02-16 17:25:22,990 Succ[1.0]
+2022-02-16 17:25:22,990 Error req[]
+2022-02-16 17:25:22,990 Latency:
+2022-02-16 17:25:22,990 ave[68.187 ms]
+2022-02-16 17:25:22,990 .50[68.877 ms]
+2022-02-16 17:25:22,990 .60[68.877 ms]
+2022-02-16 17:25:22,990 .70[68.877 ms]
+2022-02-16 17:25:22,990 .80[68.877 ms]
+2022-02-16 17:25:22,990 .90[68.877 ms]
+2022-02-16 17:25:22,990 .95[68.877 ms]
+2022-02-16 17:25:22,990 .99[68.877 ms]
+2022-02-16 17:25:22,990 Channel (server worker num[20]):
+2022-02-16 17:25:22,991 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:25:22,991 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:25:53,021 ==================== TRACER ======================
+2022-02-16 17:25:53,022 Channel (server worker num[20]):
+2022-02-16 17:25:53,022 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:25:53,022 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:26:23,052 ==================== TRACER ======================
+2022-02-16 17:26:23,053 Channel (server worker num[20]):
+2022-02-16 17:26:23,054 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:26:23,054 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:26:53,066 ==================== TRACER ======================
+2022-02-16 17:26:53,067 Channel (server worker num[20]):
+2022-02-16 17:26:53,068 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:26:53,068 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:27:23,098 ==================== TRACER ======================
+2022-02-16 17:27:23,099 Channel (server worker num[20]):
+2022-02-16 17:27:23,099 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:27:23,099 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:27:53,128 ==================== TRACER ======================
+2022-02-16 17:27:53,129 Channel (server worker num[20]):
+2022-02-16 17:27:53,129 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:27:53,129 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:28:23,147 ==================== TRACER ======================
+2022-02-16 17:28:23,147 Channel (server worker num[20]):
+2022-02-16 17:28:23,148 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:28:23,148 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:28:53,154 ==================== TRACER ======================
+2022-02-16 17:28:53,155 Channel (server worker num[20]):
+2022-02-16 17:28:53,156 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:28:53,156 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:29:23,157 ==================== TRACER ======================
+2022-02-16 17:29:23,157 Channel (server worker num[20]):
+2022-02-16 17:29:23,158 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:29:23,158 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:29:53,180 ==================== TRACER ======================
+2022-02-16 17:29:53,180 Channel (server worker num[20]):
+2022-02-16 17:29:53,181 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:29:53,181 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:30:23,211 ==================== TRACER ======================
+2022-02-16 17:30:23,212 Channel (server worker num[20]):
+2022-02-16 17:30:23,212 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:30:23,213 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:30:53,221 ==================== TRACER ======================
+2022-02-16 17:30:53,222 Channel (server worker num[20]):
+2022-02-16 17:30:53,222 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:30:53,222 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:31:23,252 ==================== TRACER ======================
+2022-02-16 17:31:23,253 Channel (server worker num[20]):
+2022-02-16 17:31:23,253 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:31:23,254 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:31:53,284 ==================== TRACER ======================
+2022-02-16 17:31:53,285 Channel (server worker num[20]):
+2022-02-16 17:31:53,285 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:31:53,285 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:32:23,291 ==================== TRACER ======================
+2022-02-16 17:32:23,291 Channel (server worker num[20]):
+2022-02-16 17:32:23,292 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:32:23,292 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:32:53,322 ==================== TRACER ======================
+2022-02-16 17:32:53,323 Channel (server worker num[20]):
+2022-02-16 17:32:53,327 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:32:53,327 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:33:23,341 ==================== TRACER ======================
+2022-02-16 17:33:23,342 Channel (server worker num[20]):
+2022-02-16 17:33:23,342 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:33:23,342 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:33:53,361 ==================== TRACER ======================
+2022-02-16 17:33:53,361 Channel (server worker num[20]):
+2022-02-16 17:33:53,362 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:33:53,362 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:34:23,365 ==================== TRACER ======================
+2022-02-16 17:34:23,365 Channel (server worker num[20]):
+2022-02-16 17:34:23,366 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:34:23,366 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-16 17:34:53,368 ==================== TRACER ======================
+2022-02-16 17:34:53,369 Channel (server worker num[20]):
+2022-02-16 17:34:53,369 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-16 17:34:53,370 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:07:33,057 ==================== TRACER ======================
+2022-02-22 13:07:33,058 Channel (server worker num[20]):
+2022-02-22 13:07:33,059 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:07:33,059 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:08:48,169 ==================== TRACER ======================
+2022-02-22 13:08:48,171 Channel (server worker num[20]):
+2022-02-22 13:08:48,172 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:08:48,172 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:09:18,202 ==================== TRACER ======================
+2022-02-22 13:09:18,203 Op(ppyolo_mbv3):
+2022-02-22 13:09:18,204 in[10806.135 ms]
+2022-02-22 13:09:18,204 prep[44.3175 ms]
+2022-02-22 13:09:18,204 midp[959.97 ms]
+2022-02-22 13:09:18,204 postp[1.931 ms]
+2022-02-22 13:09:18,204 out[0.896 ms]
+2022-02-22 13:09:18,204 idle[0.9148228859468345]
+2022-02-22 13:09:18,204 DAGExecutor:
+2022-02-22 13:09:18,204 Query count[2]
+2022-02-22 13:09:18,204 QPS[0.06666666666666667 q/s]
+2022-02-22 13:09:18,204 Succ[1.0]
+2022-02-22 13:09:18,204 Error req[]
+2022-02-22 13:09:18,204 Latency:
+2022-02-22 13:09:18,204 ave[1015.3919999999999 ms]
+2022-02-22 13:09:18,204 .50[1966.27 ms]
+2022-02-22 13:09:18,204 .60[1966.27 ms]
+2022-02-22 13:09:18,204 .70[1966.27 ms]
+2022-02-22 13:09:18,204 .80[1966.27 ms]
+2022-02-22 13:09:18,204 .90[1966.27 ms]
+2022-02-22 13:09:18,204 .95[1966.27 ms]
+2022-02-22 13:09:18,204 .99[1966.27 ms]
+2022-02-22 13:09:18,204 Channel (server worker num[20]):
+2022-02-22 13:09:18,205 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:09:18,205 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:09:48,235 ==================== TRACER ======================
+2022-02-22 13:09:48,236 Channel (server worker num[20]):
+2022-02-22 13:09:48,236 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:09:48,237 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:10:18,251 ==================== TRACER ======================
+2022-02-22 13:10:18,252 Channel (server worker num[20]):
+2022-02-22 13:10:18,252 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:10:18,252 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:10:48,261 ==================== TRACER ======================
+2022-02-22 13:10:48,261 Channel (server worker num[20]):
+2022-02-22 13:10:48,262 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:10:48,262 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:11:18,273 ==================== TRACER ======================
+2022-02-22 13:11:18,274 Channel (server worker num[20]):
+2022-02-22 13:11:18,274 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:11:18,275 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:11:48,305 ==================== TRACER ======================
+2022-02-22 13:11:48,305 Channel (server worker num[20]):
+2022-02-22 13:11:48,306 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:11:48,306 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:12:18,336 ==================== TRACER ======================
+2022-02-22 13:12:18,337 Channel (server worker num[20]):
+2022-02-22 13:12:18,337 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:12:18,337 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:12:48,347 ==================== TRACER ======================
+2022-02-22 13:12:48,348 Channel (server worker num[20]):
+2022-02-22 13:12:48,348 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:12:48,348 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:13:18,378 ==================== TRACER ======================
+2022-02-22 13:13:18,379 Channel (server worker num[20]):
+2022-02-22 13:13:18,380 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:13:18,380 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:13:48,392 ==================== TRACER ======================
+2022-02-22 13:13:48,393 Channel (server worker num[20]):
+2022-02-22 13:13:48,393 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:13:48,393 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:14:18,421 ==================== TRACER ======================
+2022-02-22 13:14:18,421 Channel (server worker num[20]):
+2022-02-22 13:14:18,422 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:14:18,422 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:14:48,425 ==================== TRACER ======================
+2022-02-22 13:14:48,425 Channel (server worker num[20]):
+2022-02-22 13:14:48,426 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:14:48,426 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:15:18,437 ==================== TRACER ======================
+2022-02-22 13:15:18,438 Channel (server worker num[20]):
+2022-02-22 13:15:18,438 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:15:18,439 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:15:48,458 ==================== TRACER ======================
+2022-02-22 13:15:48,458 Channel (server worker num[20]):
+2022-02-22 13:15:48,459 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:15:48,459 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:16:18,464 ==================== TRACER ======================
+2022-02-22 13:16:18,464 Channel (server worker num[20]):
+2022-02-22 13:16:18,465 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:16:18,466 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:16:48,496 ==================== TRACER ======================
+2022-02-22 13:16:48,496 Channel (server worker num[20]):
+2022-02-22 13:16:48,497 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:16:48,497 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:17:18,527 ==================== TRACER ======================
+2022-02-22 13:17:18,528 Channel (server worker num[20]):
+2022-02-22 13:17:18,528 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:17:18,529 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:17:48,547 ==================== TRACER ======================
+2022-02-22 13:17:48,548 Channel (server worker num[20]):
+2022-02-22 13:17:48,548 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:17:48,548 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:18:18,560 ==================== TRACER ======================
+2022-02-22 13:18:18,561 Channel (server worker num[20]):
+2022-02-22 13:18:18,561 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:18:18,561 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:18:48,589 ==================== TRACER ======================
+2022-02-22 13:18:48,589 Channel (server worker num[20]):
+2022-02-22 13:18:48,590 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:18:48,590 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:19:18,616 ==================== TRACER ======================
+2022-02-22 13:19:18,617 Channel (server worker num[20]):
+2022-02-22 13:19:18,617 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:19:18,618 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:19:48,633 ==================== TRACER ======================
+2022-02-22 13:19:48,634 Channel (server worker num[20]):
+2022-02-22 13:19:48,635 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:19:48,635 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
+2022-02-22 13:20:18,665 ==================== TRACER ======================
+2022-02-22 13:20:18,666 Channel (server worker num[20]):
+2022-02-22 13:20:18,666 chl0(In: ['@DAGExecutor'], Out: ['ppyolo_mbv3']) size[0/0]
+2022-02-22 13:20:18,666 chl1(In: ['ppyolo_mbv3'], Out: ['@DAGExecutor']) size[0/0]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/ProcessInfo.json b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/ProcessInfo.json
new file mode 100644
index 000000000..300ca0031
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/ProcessInfo.json
@@ -0,0 +1 @@
+[{"pid": 8611, "port": [9999, 2009], "model": "pipline", "start_time": 1645002562.0576003}]
\ No newline at end of file
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/__pycache__/picodet_postprocess.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/__pycache__/picodet_postprocess.cpython-37.pyc
new file mode 100644
index 000000000..d68cc4357
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/__pycache__/picodet_postprocess.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/__pycache__/preprocess.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/__pycache__/preprocess.cpython-37.pyc
new file mode 100644
index 000000000..d6993735d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/__pycache__/preprocess.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/config.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/config.yml
new file mode 100644
index 000000000..9acb26b07
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/config.yml
@@ -0,0 +1,25 @@
+dag:
+ is_thread_op: false
+ tracer:
+ interval_s: 30
+http_port: 2009
+op:
+ ppyolo_mbv3:
+ concurrency: 1
+
+ local_service_conf:
+ client_type: local_predictor
+ device_type: 2
+ devices: '0'
+ fetch_list:
+ - transpose_1.tmp_0
+ - transpose_2.tmp_0
+ - transpose_3.tmp_0
+ - transpose_4.tmp_0
+ - transpose_5.tmp_0
+ - transpose_6.tmp_0
+ - transpose_7.tmp_0
+ - transpose_0.tmp_0
+ model_config: serving_server/
+rpc_port: 9999
+worker_num: 20
\ No newline at end of file
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/infer_cfg.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/infer_cfg.yml
new file mode 100644
index 000000000..e29f9298f
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/infer_cfg.yml
@@ -0,0 +1,118 @@
+mode: fluid
+draw_threshold: 0.5
+metric: COCO
+use_dynamic_shape: false
+arch: PicoDet
+min_subgraph_size: 3
+Preprocess:
+- interp: 2
+ keep_ratio: false
+ target_size:
+ - 640
+ - 640
+ type: Resize
+- is_scale: true
+ mean:
+ - 0.485
+ - 0.456
+ - 0.406
+ std:
+ - 0.229
+ - 0.224
+ - 0.225
+ type: NormalizeImage
+- type: Permute
+- stride: 32
+ type: PadStride
+label_list:
+- person
+- bicycle
+- car
+- motorcycle
+- airplane
+- bus
+- train
+- truck
+- boat
+- traffic light
+- fire hydrant
+- stop sign
+- parking meter
+- bench
+- bird
+- cat
+- dog
+- horse
+- sheep
+- cow
+- elephant
+- bear
+- zebra
+- giraffe
+- backpack
+- umbrella
+- handbag
+- tie
+- suitcase
+- frisbee
+- skis
+- snowboard
+- sports ball
+- kite
+- baseball bat
+- baseball glove
+- skateboard
+- surfboard
+- tennis racket
+- bottle
+- wine glass
+- cup
+- fork
+- knife
+- spoon
+- bowl
+- banana
+- apple
+- sandwich
+- orange
+- broccoli
+- carrot
+- hot dog
+- pizza
+- donut
+- cake
+- chair
+- couch
+- potted plant
+- bed
+- dining table
+- toilet
+- tv
+- laptop
+- mouse
+- remote
+- keyboard
+- cell phone
+- microwave
+- oven
+- toaster
+- sink
+- refrigerator
+- book
+- clock
+- vase
+- scissors
+- teddy bear
+- hair drier
+- toothbrush
+NMS:
+ keep_top_k: 100
+ name: MultiClassNMS
+ nms_threshold: 0.5
+ nms_top_k: 1000
+ score_threshold: 0.3
+fpn_stride:
+- 8
+- 16
+- 32
+- 64
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/picodet_postprocess.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/picodet_postprocess.py
new file mode 100644
index 000000000..8fd553d87
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/picodet_postprocess.py
@@ -0,0 +1,231 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+from scipy.special import softmax
+
+
+def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
+ """
+ Args:
+ box_scores (N, 5): boxes in corner-form and probabilities.
+ iou_threshold: intersection over union threshold.
+ top_k: keep top_k results. If k <= 0, keep all the results.
+ candidate_size: only consider the candidates with the highest scores.
+ Returns:
+ picked: a list of indexes of the kept boxes
+ """
+ scores = box_scores[:, -1]
+ boxes = box_scores[:, :-1]
+ picked = []
+ indexes = np.argsort(scores)
+ indexes = indexes[-candidate_size:]
+ while len(indexes) > 0:
+ current = indexes[-1]
+ picked.append(current)
+ if 0 < top_k == len(picked) or len(indexes) == 1:
+ break
+ current_box = boxes[current, :]
+ indexes = indexes[:-1]
+ rest_boxes = boxes[indexes, :]
+ iou = iou_of(
+ rest_boxes,
+ np.expand_dims(
+ current_box, axis=0), )
+ indexes = indexes[iou <= iou_threshold]
+
+ return box_scores[picked, :]
+
+
+def iou_of(boxes0, boxes1, eps=1e-5):
+ """Return intersection-over-union (Jaccard index) of boxes.
+ Args:
+ boxes0 (N, 4): ground truth boxes.
+ boxes1 (N or 1, 4): predicted boxes.
+ eps: a small number to avoid 0 as denominator.
+ Returns:
+ iou (N): IoU values.
+ """
+ overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
+ overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
+
+ overlap_area = area_of(overlap_left_top, overlap_right_bottom)
+ area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
+ area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
+ return overlap_area / (area0 + area1 - overlap_area + eps)
+
+
+def area_of(left_top, right_bottom):
+ """Compute the areas of rectangles given two corners.
+ Args:
+ left_top (N, 2): left top corner.
+ right_bottom (N, 2): right bottom corner.
+ Returns:
+ area (N): return the area.
+ """
+ hw = np.clip(right_bottom - left_top, 0.0, None)
+ return hw[..., 0] * hw[..., 1]
+
+
+class PicoDetPostProcess(object):
+ """
+ Args:
+ input_shape (int): network input image size
+ ori_shape (int): ori image shape of before padding
+ scale_factor (float): scale factor of ori image
+ enable_mkldnn (bool): whether to open MKLDNN
+ """
+
+ def __init__(self,
+ input_shape,
+ ori_shape,
+ scale_factor,
+ strides=[8, 16, 32, 64],
+ score_threshold=0.4,
+ nms_threshold=0.5,
+ nms_top_k=1000,
+ keep_top_k=100):
+ self.ori_shape = ori_shape
+ self.input_shape = input_shape
+ self.scale_factor = scale_factor
+ self.strides = strides
+ self.score_threshold = score_threshold
+ self.nms_threshold = nms_threshold
+ self.nms_top_k = nms_top_k
+ self.keep_top_k = keep_top_k
+
+ def warp_boxes(self, boxes, ori_shape):
+ """Apply transform to boxes
+ """
+ width, height = ori_shape[1], ori_shape[0]
+ n = len(boxes)
+ if n:
+ # warp points
+ xy = np.ones((n * 4, 3))
+ xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
+ n * 4, 2) # x1y1, x2y2, x1y2, x2y1
+ # xy = xy @ M.T # transform
+ xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
+ # create new boxes
+ x = xy[:, [0, 2, 4, 6]]
+ y = xy[:, [1, 3, 5, 7]]
+ xy = np.concatenate(
+ (x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
+ # clip boxes
+ xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
+ xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
+ return xy.astype(np.float32)
+ else:
+ return boxes
+
+ def __call__(self, scores, raw_boxes):
+ batch_size = raw_boxes[0].shape[0]
+ reg_max = int(raw_boxes[0].shape[-1] / 4 - 1)
+ out_boxes_num = []
+ out_boxes_list = []
+ for batch_id in range(batch_size):
+ # generate centers
+ decode_boxes = []
+ select_scores = []
+ for stride, box_distribute, score in zip(self.strides, raw_boxes,
+ scores):
+ box_distribute = box_distribute[batch_id]
+ score = score[batch_id]
+ # centers
+ fm_h = self.input_shape[0] / stride
+ fm_w = self.input_shape[1] / stride
+ h_range = np.arange(fm_h)
+ w_range = np.arange(fm_w)
+ ww, hh = np.meshgrid(w_range, h_range)
+ ct_row = (hh.flatten() + 0.5) * stride
+ ct_col = (ww.flatten() + 0.5) * stride
+ center = np.stack((ct_col, ct_row, ct_col, ct_row), axis=1)
+
+ # box distribution to distance
+ reg_range = np.arange(reg_max + 1)
+ box_distance = box_distribute.reshape((-1, reg_max + 1))
+ box_distance = softmax(box_distance, axis=1)
+ box_distance = box_distance * np.expand_dims(reg_range, axis=0)
+ box_distance = np.sum(box_distance, axis=1).reshape((-1, 4))
+ box_distance = box_distance * stride
+ if np.mean(box_distance) == 0:
+ continue
+ # top K candidate
+ topk_idx = np.argsort(score.max(axis=1))[::-1]
+ topk_idx = topk_idx[:self.nms_top_k]
+ center = center[topk_idx]
+ score = score[topk_idx]
+ box_distance = box_distance[topk_idx]
+
+ # decode box
+ decode_box = center + [-1, -1, 1, 1] * box_distance
+
+ select_scores.append(score)
+ decode_boxes.append(decode_box)
+
+ # nms
+ if len(decode_boxes) == 0:
+ return [],[]
+ bboxes = np.concatenate(decode_boxes, axis=0)
+ confidences = np.concatenate(select_scores, axis=0)
+ picked_box_probs = []
+ picked_labels = []
+ for class_index in range(0, confidences.shape[1]):
+ probs = confidences[:, class_index]
+ mask = probs > self.score_threshold
+ probs = probs[mask]
+ if probs.shape[0] == 0:
+ continue
+ subset_boxes = bboxes[mask, :]
+ box_probs = np.concatenate(
+ [subset_boxes, probs.reshape(-1, 1)], axis=1)
+ box_probs = hard_nms(
+ box_probs,
+ iou_threshold=self.nms_threshold,
+ top_k=self.keep_top_k, )
+ picked_box_probs.append(box_probs)
+ picked_labels.extend([class_index] * box_probs.shape[0])
+
+ if len(picked_box_probs) == 0:
+ out_boxes_list.append(np.empty((0, 4)))
+ out_boxes_num.append(0)
+
+ else:
+ picked_box_probs = np.concatenate(picked_box_probs)
+
+ # resize output boxes
+ picked_box_probs[:, :4] = self.warp_boxes(
+ picked_box_probs[:, :4], self.ori_shape[batch_id])
+ im_scale = np.concatenate([
+ self.scale_factor[batch_id][::-1],
+ self.scale_factor[batch_id][::-1]
+ ])
+ picked_box_probs[:, :4] /= im_scale
+ # clas score box
+ out_boxes_list.append(
+ np.concatenate(
+ [
+ np.expand_dims(
+ np.array(picked_labels),
+ axis=-1), np.expand_dims(
+ picked_box_probs[:, 4], axis=-1),
+ picked_box_probs[:, :4]
+ ],
+ axis=1))
+ out_boxes_num.append(len(picked_labels))
+
+ out_boxes_list = np.concatenate(out_boxes_list, axis=0)
+ out_boxes_num = np.asarray(out_boxes_num).astype(np.int32)
+
+ return out_boxes_list, out_boxes_num
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/pipeline_http_client.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/pipeline_http_client.py
new file mode 100644
index 000000000..f2d165277
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/pipeline_http_client.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# from paddle_serving_server.pipeline import PipelineClient
+import numpy as np
+import requests
+import json
+import cv2
+import base64
+import os
+from time import *
+import threading
+
+
+def demo(url,data,i):
+ begin_time = time()
+ r = requests.post(url=url, data=json.dumps(data))
+ end_time = time()
+ run_time = end_time-begin_time
+ print ('线程 %d 时间 %f '%(i,run_time))
+ print(r.json())
+
+
+def cv2_to_base64(image):
+ return base64.b64encode(image).decode('utf8')
+
+url = "http://127.0.0.1:2009/ppyolo_mbv3/prediction"
+with open(os.path.join(".", "test.jpg"), 'rb') as file:
+ image_data1 = file.read()
+image = cv2_to_base64(image_data1)
+category_dict={0.0:"person",1.0:"bicycle",2.0:"motorcycle"}
+data = {"key": ["image"], "value": [image]}
+r = requests.post(url=url, data=json.dumps(data))
+print(r.json())
+'''
+results = eval(r.json()['value'][0])
+img = cv2.imread("test.jpg")
+for result in results:
+ if result["score"] > 0.5:
+ left, right, top, bottom= int(result['bbox'][0]), int(result['bbox'][2]), int(result['bbox'][1]), int(result['bbox'][3])
+ cv2.rectangle(img,(left ,top),(right,bottom), (0, 0, 255), 2)
+ cv2.putText(img,str(round(result["score"],2)),(left,top-10), cv2.FONT_HERSHEY_SIMPLEX,1.2,(0,255,0),2)
+ print(category_dict[result["category_id"]])
+ cv2.putText(img,category_dict[result["category_id"]],(left,top+20), cv2.FONT_HERSHEY_SIMPLEX,1.2,(0,255,0),2)
+cv2.imwrite("./result.jpg",img)
+'''
+
+
+
+
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/preprocess.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/preprocess.py
new file mode 100644
index 000000000..644c8ce3f
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/preprocess.py
@@ -0,0 +1,395 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cv2
+import numpy as np
+
+
+def decode_image(im_file, im_info):
+ """read rgb image
+ Args:
+ im_file (str|np.ndarray): input can be image path or np.ndarray
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ if isinstance(im_file, str):
+ with open(im_file, 'rb') as f:
+ im_read = f.read()
+ data = np.frombuffer(im_read, dtype='uint8')
+ im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
+ else:
+ im = im_file
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+ im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
+ return im, im_info
+
+
+class Resize(object):
+ """resize image by target_size and max_size
+ Args:
+ target_size (int): the target size of image
+ keep_ratio (bool): whether keep_ratio or not, default true
+ interp (int): method of resize
+ """
+
+ def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
+ if isinstance(target_size, int):
+ target_size = [target_size, target_size]
+ self.target_size = target_size
+ self.keep_ratio = keep_ratio
+ self.interp = interp
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ assert len(self.target_size) == 2
+ assert self.target_size[0] > 0 and self.target_size[1] > 0
+ im_channel = im.shape[2]
+ im_scale_y, im_scale_x = self.generate_scale(im)
+ im = cv2.resize(
+ im,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=self.interp)
+ im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
+ im_info['scale_factor'] = np.array(
+ [im_scale_y, im_scale_x]).astype('float32')
+ return im, im_info
+
+ def generate_scale(self, im):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ Returns:
+ im_scale_x: the resize ratio of X
+ im_scale_y: the resize ratio of Y
+ """
+ origin_shape = im.shape[:2]
+ im_c = im.shape[2]
+ if self.keep_ratio:
+ im_size_min = np.min(origin_shape)
+ im_size_max = np.max(origin_shape)
+ target_size_min = np.min(self.target_size)
+ target_size_max = np.max(self.target_size)
+ im_scale = float(target_size_min) / float(im_size_min)
+ if np.round(im_scale * im_size_max) > target_size_max:
+ im_scale = float(target_size_max) / float(im_size_max)
+ im_scale_x = im_scale
+ im_scale_y = im_scale
+ else:
+ resize_h, resize_w = self.target_size
+ im_scale_y = resize_h / float(origin_shape[0])
+ im_scale_x = resize_w / float(origin_shape[1])
+ return im_scale_y, im_scale_x
+
+
+class NormalizeImage(object):
+ """normalize image
+ Args:
+ mean (list): im - mean
+ std (list): im / std
+ is_scale (bool): whether need im / 255
+ is_channel_first (bool): if True: image shape is CHW, else: HWC
+ """
+
+ def __init__(self, mean, std, is_scale=True):
+ self.mean = mean
+ self.std = std
+ self.is_scale = is_scale
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ im = im.astype(np.float32, copy=False)
+ mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
+ std = np.array(self.std)[np.newaxis, np.newaxis, :]
+
+ if self.is_scale:
+ im = im / 255.0
+ im -= mean
+ im /= std
+ return im, im_info
+
+
+class Permute(object):
+ """permute image
+ Args:
+ to_bgr (bool): whether convert RGB to BGR
+ channel_first (bool): whether convert HWC to CHW
+ """
+
+ def __init__(self, ):
+ super(Permute, self).__init__()
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ im = im.transpose((2, 0, 1)).copy()
+ return im, im_info
+
+
+class PadStride(object):
+ """ padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
+ Args:
+ stride (bool): model with FPN need image shape % stride == 0
+ """
+
+ def __init__(self, stride=0):
+ self.coarsest_stride = stride
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ coarsest_stride = self.coarsest_stride
+ if coarsest_stride <= 0:
+ return im, im_info
+ im_c, im_h, im_w = im.shape
+ pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
+ pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
+ padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
+ padding_im[:, :im_h, :im_w] = im
+ return padding_im, im_info
+
+
+class LetterBoxResize(object):
+ def __init__(self, target_size):
+ """
+ Resize image to target size, convert normalized xywh to pixel xyxy
+ format ([x_center, y_center, width, height] -> [x0, y0, x1, y1]).
+ Args:
+ target_size (int|list): image target size.
+ """
+ super(LetterBoxResize, self).__init__()
+ if isinstance(target_size, int):
+ target_size = [target_size, target_size]
+ self.target_size = target_size
+
+ def letterbox(self, img, height, width, color=(127.5, 127.5, 127.5)):
+ # letterbox: resize a rectangular image to a padded rectangular
+ shape = img.shape[:2] # [height, width]
+ ratio_h = float(height) / shape[0]
+ ratio_w = float(width) / shape[1]
+ ratio = min(ratio_h, ratio_w)
+ new_shape = (round(shape[1] * ratio),
+ round(shape[0] * ratio)) # [width, height]
+ padw = (width - new_shape[0]) / 2
+ padh = (height - new_shape[1]) / 2
+ top, bottom = round(padh - 0.1), round(padh + 0.1)
+ left, right = round(padw - 0.1), round(padw + 0.1)
+
+ img = cv2.resize(
+ img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
+ img = cv2.copyMakeBorder(
+ img, top, bottom, left, right, cv2.BORDER_CONSTANT,
+ value=color) # padded rectangular
+ return img, ratio, padw, padh
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ assert len(self.target_size) == 2
+ assert self.target_size[0] > 0 and self.target_size[1] > 0
+ height, width = self.target_size
+ h, w = im.shape[:2]
+ im, ratio, padw, padh = self.letterbox(im, height=height, width=width)
+
+ new_shape = [round(h * ratio), round(w * ratio)]
+ im_info['im_shape'] = np.array(new_shape, dtype=np.float32)
+ im_info['scale_factor'] = np.array([ratio, ratio], dtype=np.float32)
+ return im, im_info
+
+
+class WarpAffine(object):
+ """Warp affine the image
+ """
+
+ def __init__(self,
+ keep_res=False,
+ pad=31,
+ input_h=512,
+ input_w=512,
+ scale=0.4,
+ shift=0.1):
+ self.keep_res = keep_res
+ self.pad = pad
+ self.input_h = input_h
+ self.input_w = input_w
+ self.scale = scale
+ self.shift = shift
+
+ def _get_3rd_point(self, a, b):
+ assert len(
+ a) == 2, 'input of _get_3rd_point should be point with length of 2'
+ assert len(
+ b) == 2, 'input of _get_3rd_point should be point with length of 2'
+ direction = a - b
+ third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)
+ return third_pt
+
+ def rotate_point(self, pt, angle_rad):
+ """Rotate a point by an angle.
+
+ Args:
+ pt (list[float]): 2 dimensional point to be rotated
+ angle_rad (float): rotation angle by radian
+
+ Returns:
+ list[float]: Rotated point.
+ """
+ assert len(pt) == 2
+ sn, cs = np.sin(angle_rad), np.cos(angle_rad)
+ new_x = pt[0] * cs - pt[1] * sn
+ new_y = pt[0] * sn + pt[1] * cs
+ rotated_pt = [new_x, new_y]
+
+ return rotated_pt
+
+ def get_affine_transform(self,
+ center,
+ input_size,
+ rot,
+ output_size,
+ shift=(0., 0.),
+ inv=False):
+ """Get the affine transform matrix, given the center/scale/rot/output_size.
+
+ Args:
+ center (np.ndarray[2, ]): Center of the bounding box (x, y).
+ input_size (np.ndarray[2, ]): Size of input feature (width, height).
+ rot (float): Rotation angle (degree).
+ output_size (np.ndarray[2, ]): Size of the destination heatmaps.
+ shift (0-100%): Shift translation ratio wrt the width/height.
+ Default (0., 0.).
+ inv (bool): Option to inverse the affine transform direction.
+ (inv=False: src->dst or inv=True: dst->src)
+
+ Returns:
+ np.ndarray: The transform matrix.
+ """
+ assert len(center) == 2
+ assert len(output_size) == 2
+ assert len(shift) == 2
+
+ if not isinstance(input_size, (np.ndarray, list)):
+ input_size = np.array([input_size, input_size], dtype=np.float32)
+ scale_tmp = input_size
+
+ shift = np.array(shift)
+ src_w = scale_tmp[0]
+ dst_w = output_size[0]
+ dst_h = output_size[1]
+
+ rot_rad = np.pi * rot / 180
+ src_dir = self.rotate_point([0., src_w * -0.5], rot_rad)
+ dst_dir = np.array([0., dst_w * -0.5])
+
+ src = np.zeros((3, 2), dtype=np.float32)
+
+ src[0, :] = center + scale_tmp * shift
+ src[1, :] = center + src_dir + scale_tmp * shift
+ src[2, :] = self._get_3rd_point(src[0, :], src[1, :])
+
+ dst = np.zeros((3, 2), dtype=np.float32)
+ dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
+ dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
+ dst[2, :] = self._get_3rd_point(dst[0, :], dst[1, :])
+
+ if inv:
+ trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
+ else:
+ trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
+
+ return trans
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
+
+ h, w = img.shape[:2]
+
+ if self.keep_res:
+ input_h = (h | self.pad) + 1
+ input_w = (w | self.pad) + 1
+ s = np.array([input_w, input_h], dtype=np.float32)
+ c = np.array([w // 2, h // 2], dtype=np.float32)
+
+ else:
+ s = max(h, w) * 1.0
+ input_h, input_w = self.input_h, self.input_w
+ c = np.array([w / 2., h / 2.], dtype=np.float32)
+
+ trans_input = self.get_affine_transform(c, s, 0, [input_w, input_h])
+ img = cv2.resize(img, (w, h))
+ inp = cv2.warpAffine(
+ img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
+ return inp, im_info
+
+
+def preprocess(im, preprocess_ops):
+ # process image by preprocess_ops
+ im_info = {
+ 'scale_factor': np.array(
+ [1., 1.], dtype=np.float32),
+ 'im_shape': None,
+ }
+ im, im_info = decode_image(im, im_info)
+ #print(im)
+ #print(im_info)
+ #print(preprocess_ops)
+ #print("preprocess(im, preprocess_ops):................")
+ for operator in preprocess_ops:
+ im, im_info = operator(im, im_info)
+ return im, im_info
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/result.jpg b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/result.jpg
new file mode 100644
index 000000000..b20b3f2f6
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/result.jpg differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_client/serving_client_conf.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_client/serving_client_conf.prototxt
new file mode 100644
index 000000000..acb7ecc7d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_client/serving_client_conf.prototxt
@@ -0,0 +1,66 @@
+feed_var {
+ name: "image"
+ alias_name: "image"
+ is_lod_tensor: false
+ feed_type: 1
+ shape: 1
+ shape: 3
+ shape: 640
+ shape: 640
+}
+fetch_var {
+ name: "transpose_0.tmp_0"
+ alias_name: "transpose_0.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_1.tmp_0"
+ alias_name: "transpose_1.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_2.tmp_0"
+ alias_name: "transpose_2.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_3.tmp_0"
+ alias_name: "transpose_3.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_4.tmp_0"
+ alias_name: "transpose_4.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_5.tmp_0"
+ alias_name: "transpose_5.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_6.tmp_0"
+ alias_name: "transpose_6.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_7.tmp_0"
+ alias_name: "transpose_7.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_client/serving_client_conf.stream.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_client/serving_client_conf.stream.prototxt
new file mode 100644
index 000000000..d0b1bbc31
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_client/serving_client_conf.stream.prototxt differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/__model__ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/__model__
new file mode 100644
index 000000000..3008d8b97
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/__model__ differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/__params__ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/__params__
new file mode 100644
index 000000000..b7edffd8a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/__params__ differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/model.pdmodel b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/model.pdmodel
new file mode 100644
index 000000000..e69de29bb
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/serving_server_conf.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/serving_server_conf.prototxt
new file mode 100644
index 000000000..acb7ecc7d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/serving_server_conf.prototxt
@@ -0,0 +1,66 @@
+feed_var {
+ name: "image"
+ alias_name: "image"
+ is_lod_tensor: false
+ feed_type: 1
+ shape: 1
+ shape: 3
+ shape: 640
+ shape: 640
+}
+fetch_var {
+ name: "transpose_0.tmp_0"
+ alias_name: "transpose_0.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_1.tmp_0"
+ alias_name: "transpose_1.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_2.tmp_0"
+ alias_name: "transpose_2.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_3.tmp_0"
+ alias_name: "transpose_3.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_4.tmp_0"
+ alias_name: "transpose_4.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_5.tmp_0"
+ alias_name: "transpose_5.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_6.tmp_0"
+ alias_name: "transpose_6.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
+fetch_var {
+ name: "transpose_7.tmp_0"
+ alias_name: "transpose_7.tmp_0"
+ is_lod_tensor: true
+ fetch_type: 1
+ shape: -1
+}
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/serving_server_conf.stream.prototxt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/serving_server_conf.stream.prototxt
new file mode 100644
index 000000000..d0b1bbc31
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/serving_server/serving_server_conf.stream.prototxt differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/test.jpg b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/test.jpg
new file mode 100644
index 000000000..4f18d55c4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/test.jpg differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/web_service.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/web_service.py
new file mode 100644
index 000000000..8ed6a6086
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/picodet_lcnet_1_5x_416_coco/web_service.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from paddle_serving_server.web_service import WebService, Op
+import logging
+import numpy as np
+import sys
+import cv2
+from paddle_serving_app.reader import *
+import base64
+import os
+import yaml
+import glob
+from picodet_postprocess import PicoDetPostProcess
+from preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride, LetterBoxResize, WarpAffine
+
+class PPYoloMbvOp(Op):
+ def init_op(self):
+ self.feed_dict={}
+ deploy_file = 'infer_cfg.yml'
+ with open(deploy_file) as f:
+ yml_conf = yaml.safe_load(f)
+ preprocess_infos = yml_conf['Preprocess']
+ self.preprocess_ops = []
+ for op_info in preprocess_infos:
+ new_op_info = op_info.copy()
+ op_type = new_op_info.pop('type')
+ self.preprocess_ops.append(eval(op_type)(**new_op_info))
+ #print(self.preprocess_ops)
+
+ def preprocess(self, input_dicts, data_id, log_id):
+ (_, input_dict), = input_dicts.items()
+ imgs = []
+ for key in input_dict.keys():
+ data = base64.b64decode(input_dict[key].encode('utf8'))
+ data = np.fromstring(data, np.uint8)
+ im = cv2.imdecode(data, 1)
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
+ im_info = {
+ 'scale_factor': np.array(
+ [1., 1.], dtype=np.float32),
+ 'im_shape': None,
+ }
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+ im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
+ for operator in self.preprocess_ops:
+ im, im_info = operator(im, im_info)
+ imgs.append({
+ "image": im[np.newaxis,:],
+ "im_shape": [im_info['im_shape']],#np.array(list(im.shape[1:])).reshape(-1)[np.newaxis,:],
+ "scale_factor": [im_info['scale_factor']],#np.array([im_scale_y, im_scale_x]).astype('float32'),
+ })
+ self.feed_dict = {
+ "image": np.concatenate([x["image"] for x in imgs], axis=0),
+ "im_shape": np.concatenate([x["im_shape"] for x in imgs], axis=0),
+ "scale_factor": np.concatenate([x["scale_factor"] for x in imgs], axis=0)
+ }
+ #print(self.feed_dict)
+ #for key in self.feed_dict.keys():
+ # print(key, self.feed_dict[key].shape)
+
+ return self.feed_dict, False, None, ""
+
+ def postprocess(self, input_dicts, fetch_dict, log_id,data_id =0):
+ #print(fetch_dict)
+ np_score_list = []
+ np_boxes_list = []
+ i = 0
+ for value in fetch_dict.values():#range(4):
+ if i<4:
+ np_score_list.append(value)
+ else:
+ np_boxes_list.append(value)
+ i=i+1
+
+ post_process = PicoDetPostProcess(
+ (640,640),
+ self.feed_dict['im_shape'],
+ self.feed_dict['scale_factor'],
+ [8, 16, 32, 64],
+ 0.5)
+ res_dict = {}
+ np_boxes, np_boxes_num = post_process(np_score_list, np_boxes_list)
+ if len(np_boxes) == 0:
+ return res_dict, None, ""
+
+ d = []
+ for b in range(np_boxes.shape[0]):
+ c = {}
+ #print(b)
+ c["category_id"] = np_boxes[b][0]
+ c["bbox"] = [np_boxes[b][2],np_boxes[b][3],np_boxes[b][4],np_boxes[b][5]]
+ c["score"] = np_boxes[b][1]
+ d.append(c)
+ res_dict["bbox_result"] = str(d)
+ #fetch_dict["image"] = "234.png"
+ #res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict, visualize=False))}
+ return res_dict, None, ""
+
+
+class PPYoloMbv(WebService):
+ def get_pipeline_response(self, read_op):
+ ppyolo_mbv3_op = PPYoloMbvOp(name="ppyolo_mbv3", input_ops=[read_op])
+ return ppyolo_mbv3_op
+
+
+ppyolo_mbv3_service = PPYoloMbv(name="ppyolo_mbv3")
+ppyolo_mbv3_service.prepare_pipeline_config("config.yml")
+ppyolo_mbv3_service.run_service()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/__init__.py
new file mode 100644
index 000000000..6fcc982fb
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/__init__.py
@@ -0,0 +1,26 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import (core, data, engine, modeling, model_zoo, optimizer, metrics,
+ utils, slim)
+
+
+try:
+ from .version import full_version as __version__
+ from .version import commit as __git_commit__
+except ImportError:
+ import sys
+ sys.stderr.write("Warning: import ppdet from source directory " \
+ "without installing, run 'python setup.py install' to " \
+ "install ppdet firstly\n")
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..2798aa67c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/__pycache__/optimizer.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/__pycache__/optimizer.cpython-37.pyc
new file mode 100644
index 000000000..f313e3454
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/__pycache__/optimizer.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/__init__.py
new file mode 100644
index 000000000..d04277177
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import config
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..e9d9642f5
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/__pycache__/workspace.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/__pycache__/workspace.cpython-37.pyc
new file mode 100644
index 000000000..9b731d98d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/__pycache__/workspace.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__init__.py
new file mode 100644
index 000000000..d0c32e260
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..c6c49d37f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__pycache__/schema.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__pycache__/schema.cpython-37.pyc
new file mode 100644
index 000000000..b29e3e2bb
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__pycache__/schema.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__pycache__/yaml_helpers.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__pycache__/yaml_helpers.cpython-37.pyc
new file mode 100644
index 000000000..d92cfca35
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/__pycache__/yaml_helpers.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/schema.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/schema.py
new file mode 100644
index 000000000..2e41b5c34
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/schema.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import division
+
+import inspect
+import importlib
+import re
+
+try:
+ from docstring_parser import parse as doc_parse
+except Exception:
+
+ def doc_parse(*args):
+ pass
+
+
+try:
+ from typeguard import check_type
+except Exception:
+
+ def check_type(*args):
+ pass
+
+
+__all__ = ['SchemaValue', 'SchemaDict', 'SharedConfig', 'extract_schema']
+
+
+class SchemaValue(object):
+ def __init__(self, name, doc='', type=None):
+ super(SchemaValue, self).__init__()
+ self.name = name
+ self.doc = doc
+ self.type = type
+
+ def set_default(self, value):
+ self.default = value
+
+ def has_default(self):
+ return hasattr(self, 'default')
+
+
+class SchemaDict(dict):
+ def __init__(self, **kwargs):
+ super(SchemaDict, self).__init__()
+ self.schema = {}
+ self.strict = False
+ self.doc = ""
+ self.update(kwargs)
+
+ def __setitem__(self, key, value):
+ # XXX also update regular dict to SchemaDict??
+ if isinstance(value, dict) and key in self and isinstance(self[key],
+ SchemaDict):
+ self[key].update(value)
+ else:
+ super(SchemaDict, self).__setitem__(key, value)
+
+ def __missing__(self, key):
+ if self.has_default(key):
+ return self.schema[key].default
+ elif key in self.schema:
+ return self.schema[key]
+ else:
+ raise KeyError(key)
+
+ def copy(self):
+ newone = SchemaDict()
+ newone.__dict__.update(self.__dict__)
+ newone.update(self)
+ return newone
+
+ def set_schema(self, key, value):
+ assert isinstance(value, SchemaValue)
+ self.schema[key] = value
+
+ def set_strict(self, strict):
+ self.strict = strict
+
+ def has_default(self, key):
+ return key in self.schema and self.schema[key].has_default()
+
+ def is_default(self, key):
+ if not self.has_default(key):
+ return False
+ if hasattr(self[key], '__dict__'):
+ return True
+ else:
+ return key not in self or self[key] == self.schema[key].default
+
+ def find_default_keys(self):
+ return [
+ k for k in list(self.keys()) + list(self.schema.keys())
+ if self.is_default(k)
+ ]
+
+ def mandatory(self):
+ return any([k for k in self.schema.keys() if not self.has_default(k)])
+
+ def find_missing_keys(self):
+ missing = [
+ k for k in self.schema.keys()
+ if k not in self and not self.has_default(k)
+ ]
+ placeholders = [k for k in self if self[k] in ('', '')]
+ return missing + placeholders
+
+ def find_extra_keys(self):
+ return list(set(self.keys()) - set(self.schema.keys()))
+
+ def find_mismatch_keys(self):
+ mismatch_keys = []
+ for arg in self.schema.values():
+ if arg.type is not None:
+ try:
+ check_type("{}.{}".format(self.name, arg.name),
+ self[arg.name], arg.type)
+ except Exception:
+ mismatch_keys.append(arg.name)
+ return mismatch_keys
+
+ def validate(self):
+ missing_keys = self.find_missing_keys()
+ if missing_keys:
+ raise ValueError("Missing param for class<{}>: {}".format(
+ self.name, ", ".join(missing_keys)))
+ extra_keys = self.find_extra_keys()
+ if extra_keys and self.strict:
+ raise ValueError("Extraneous param for class<{}>: {}".format(
+ self.name, ", ".join(extra_keys)))
+ mismatch_keys = self.find_mismatch_keys()
+ if mismatch_keys:
+ raise TypeError("Wrong param type for class<{}>: {}".format(
+ self.name, ", ".join(mismatch_keys)))
+
+
+class SharedConfig(object):
+ """
+ Representation class for `__shared__` annotations, which work as follows:
+
+ - if `key` is set for the module in config file, its value will take
+ precedence
+ - if `key` is not set for the module but present in the config file, its
+ value will be used
+ - otherwise, use the provided `default_value` as fallback
+
+ Args:
+ key: config[key] will be injected
+ default_value: fallback value
+ """
+
+ def __init__(self, key, default_value=None):
+ super(SharedConfig, self).__init__()
+ self.key = key
+ self.default_value = default_value
+
+
+def extract_schema(cls):
+ """
+ Extract schema from a given class
+
+ Args:
+ cls (type): Class from which to extract.
+
+ Returns:
+ schema (SchemaDict): Extracted schema.
+ """
+ ctor = cls.__init__
+ # python 2 compatibility
+ if hasattr(inspect, 'getfullargspec'):
+ argspec = inspect.getfullargspec(ctor)
+ annotations = argspec.annotations
+ has_kwargs = argspec.varkw is not None
+ else:
+ argspec = inspect.getfullargspec(ctor)
+ # python 2 type hinting workaround, see pep-3107
+ # however, since `typeguard` does not support python 2, type checking
+ # is still python 3 only for now
+ annotations = getattr(ctor, '__annotations__', {})
+ has_kwargs = argspec.varkw is not None
+
+ names = [arg for arg in argspec.args if arg != 'self']
+ defaults = argspec.defaults
+ num_defaults = argspec.defaults is not None and len(argspec.defaults) or 0
+ num_required = len(names) - num_defaults
+
+ docs = cls.__doc__
+ if docs is None and getattr(cls, '__category__', None) == 'op':
+ docs = cls.__call__.__doc__
+ try:
+ docstring = doc_parse(docs)
+ except Exception:
+ docstring = None
+
+ if docstring is None:
+ comments = {}
+ else:
+ comments = {}
+ for p in docstring.params:
+ match_obj = re.match('^([a-zA-Z_]+[a-zA-Z_0-9]*).*', p.arg_name)
+ if match_obj is not None:
+ comments[match_obj.group(1)] = p.description
+
+ schema = SchemaDict()
+ schema.name = cls.__name__
+ schema.doc = ""
+ if docs is not None:
+ start_pos = docs[0] == '\n' and 1 or 0
+ schema.doc = docs[start_pos:].split("\n")[0].strip()
+ # XXX handle paddle's weird doc convention
+ if '**' == schema.doc[:2] and '**' == schema.doc[-2:]:
+ schema.doc = schema.doc[2:-2].strip()
+ schema.category = hasattr(cls, '__category__') and getattr(
+ cls, '__category__') or 'module'
+ schema.strict = not has_kwargs
+ schema.pymodule = importlib.import_module(cls.__module__)
+ schema.inject = getattr(cls, '__inject__', [])
+ schema.shared = getattr(cls, '__shared__', [])
+ for idx, name in enumerate(names):
+ comment = name in comments and comments[name] or name
+ if name in schema.inject:
+ type_ = None
+ else:
+ type_ = name in annotations and annotations[name] or None
+ value_schema = SchemaValue(name, comment, type_)
+ if name in schema.shared:
+ assert idx >= num_required, "shared config must have default value"
+ default = defaults[idx - num_required]
+ value_schema.set_default(SharedConfig(name, default))
+ elif idx >= num_required:
+ default = defaults[idx - num_required]
+ value_schema.set_default(default)
+ schema.set_schema(name, value_schema)
+
+ return schema
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/yaml_helpers.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/yaml_helpers.py
new file mode 100644
index 000000000..181cfe6fc
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/config/yaml_helpers.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib
+import inspect
+
+import yaml
+from .schema import SharedConfig
+
+__all__ = ['serializable', 'Callable']
+
+
+def represent_dictionary_order(self, dict_data):
+ return self.represent_mapping('tag:yaml.org,2002:map', dict_data.items())
+
+
+def setup_orderdict():
+ from collections import OrderedDict
+ yaml.add_representer(OrderedDict, represent_dictionary_order)
+
+
+def _make_python_constructor(cls):
+ def python_constructor(loader, node):
+ if isinstance(node, yaml.SequenceNode):
+ args = loader.construct_sequence(node, deep=True)
+ return cls(*args)
+ else:
+ kwargs = loader.construct_mapping(node, deep=True)
+ try:
+ return cls(**kwargs)
+ except Exception as ex:
+ print("Error when construct {} instance from yaml config".
+ format(cls.__name__))
+ raise ex
+
+ return python_constructor
+
+
+def _make_python_representer(cls):
+ # python 2 compatibility
+ if hasattr(inspect, 'getfullargspec'):
+ argspec = inspect.getfullargspec(cls)
+ else:
+ argspec = inspect.getfullargspec(cls.__init__)
+ argnames = [arg for arg in argspec.args if arg != 'self']
+
+ def python_representer(dumper, obj):
+ if argnames:
+ data = {name: getattr(obj, name) for name in argnames}
+ else:
+ data = obj.__dict__
+ if '_id' in data:
+ del data['_id']
+ return dumper.represent_mapping(u'!{}'.format(cls.__name__), data)
+
+ return python_representer
+
+
+def serializable(cls):
+ """
+ Add loader and dumper for given class, which must be
+ "trivially serializable"
+
+ Args:
+ cls: class to be serialized
+
+ Returns: cls
+ """
+ yaml.add_constructor(u'!{}'.format(cls.__name__),
+ _make_python_constructor(cls))
+ yaml.add_representer(cls, _make_python_representer(cls))
+ return cls
+
+
+yaml.add_representer(SharedConfig,
+ lambda d, o: d.represent_data(o.default_value))
+
+
+@serializable
+class Callable(object):
+ """
+ Helper to be used in Yaml for creating arbitrary class objects
+
+ Args:
+ full_type (str): the full module path to target function
+ """
+
+ def __init__(self, full_type, args=[], kwargs={}):
+ super(Callable, self).__init__()
+ self.full_type = full_type
+ self.args = args
+ self.kwargs = kwargs
+
+ def __call__(self):
+ if '.' in self.full_type:
+ idx = self.full_type.rfind('.')
+ module = importlib.import_module(self.full_type[:idx])
+ func_name = self.full_type[idx + 1:]
+ else:
+ try:
+ module = importlib.import_module('builtins')
+ except Exception:
+ module = importlib.import_module('__builtin__')
+ func_name = self.full_type
+
+ func = getattr(module, func_name)
+ return func(*self.args, **self.kwargs)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/workspace.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/workspace.py
new file mode 100644
index 000000000..e633746ed
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/core/workspace.py
@@ -0,0 +1,275 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import division
+
+import importlib
+import os
+import sys
+
+import yaml
+import collections
+
+try:
+ collectionsAbc = collections.abc
+except AttributeError:
+ collectionsAbc = collections
+
+from .config.schema import SchemaDict, SharedConfig, extract_schema
+from .config.yaml_helpers import serializable
+
+__all__ = [
+ 'global_config',
+ 'load_config',
+ 'merge_config',
+ 'get_registered_modules',
+ 'create',
+ 'register',
+ 'serializable',
+ 'dump_value',
+]
+
+
+def dump_value(value):
+ # XXX this is hackish, but collections.abc is not available in python 2
+ if hasattr(value, '__dict__') or isinstance(value, (dict, tuple, list)):
+ value = yaml.dump(value, default_flow_style=True)
+ value = value.replace('\n', '')
+ value = value.replace('...', '')
+ return "'{}'".format(value)
+ else:
+ # primitive types
+ return str(value)
+
+
+class AttrDict(dict):
+ """Single level attribute dict, NOT recursive"""
+
+ def __init__(self, **kwargs):
+ super(AttrDict, self).__init__()
+ super(AttrDict, self).update(kwargs)
+
+ def __getattr__(self, key):
+ if key in self:
+ return self[key]
+ raise AttributeError("object has no attribute '{}'".format(key))
+
+
+global_config = AttrDict()
+
+BASE_KEY = '_BASE_'
+
+
+# parse and load _BASE_ recursively
+def _load_config_with_base(file_path):
+ with open(file_path) as f:
+ file_cfg = yaml.load(f, Loader=yaml.Loader)
+
+ # NOTE: cfgs outside have higher priority than cfgs in _BASE_
+ if BASE_KEY in file_cfg:
+ all_base_cfg = AttrDict()
+ base_ymls = list(file_cfg[BASE_KEY])
+ for base_yml in base_ymls:
+ if base_yml.startswith("~"):
+ base_yml = os.path.expanduser(base_yml)
+ if not base_yml.startswith('/'):
+ base_yml = os.path.join(os.path.dirname(file_path), base_yml)
+
+ with open(base_yml) as f:
+ base_cfg = _load_config_with_base(base_yml)
+ all_base_cfg = merge_config(base_cfg, all_base_cfg)
+
+ del file_cfg[BASE_KEY]
+ return merge_config(file_cfg, all_base_cfg)
+
+ return file_cfg
+
+
+def load_config(file_path):
+ """
+ Load config from file.
+
+ Args:
+ file_path (str): Path of the config file to be loaded.
+
+ Returns: global config
+ """
+ _, ext = os.path.splitext(file_path)
+ assert ext in ['.yml', '.yaml'], "only support yaml files for now"
+
+ # load config from file and merge into global config
+ cfg = _load_config_with_base(file_path)
+ cfg['filename'] = os.path.splitext(os.path.split(file_path)[-1])[0]
+ merge_config(cfg)
+
+ return global_config
+
+
+def dict_merge(dct, merge_dct):
+ """ Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
+ updating only top-level keys, dict_merge recurses down into dicts nested
+ to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
+ ``dct``.
+
+ Args:
+ dct: dict onto which the merge is executed
+ merge_dct: dct merged into dct
+
+ Returns: dct
+ """
+ for k, v in merge_dct.items():
+ if (k in dct and isinstance(dct[k], dict) and
+ isinstance(merge_dct[k], collectionsAbc.Mapping)):
+ dict_merge(dct[k], merge_dct[k])
+ else:
+ dct[k] = merge_dct[k]
+ return dct
+
+
+def merge_config(config, another_cfg=None):
+ """
+ Merge config into global config or another_cfg.
+
+ Args:
+ config (dict): Config to be merged.
+
+ Returns: global config
+ """
+ global global_config
+ dct = another_cfg or global_config
+ return dict_merge(dct, config)
+
+
+def get_registered_modules():
+ return {k: v for k, v in global_config.items() if isinstance(v, SchemaDict)}
+
+
+def make_partial(cls):
+ op_module = importlib.import_module(cls.__op__.__module__)
+ op = getattr(op_module, cls.__op__.__name__)
+ cls.__category__ = getattr(cls, '__category__', None) or 'op'
+
+ def partial_apply(self, *args, **kwargs):
+ kwargs_ = self.__dict__.copy()
+ kwargs_.update(kwargs)
+ return op(*args, **kwargs_)
+
+ if getattr(cls, '__append_doc__', True): # XXX should default to True?
+ if sys.version_info[0] > 2:
+ cls.__doc__ = "Wrapper for `{}` OP".format(op.__name__)
+ cls.__init__.__doc__ = op.__doc__
+ cls.__call__ = partial_apply
+ cls.__call__.__doc__ = op.__doc__
+ else:
+ # XXX work around for python 2
+ partial_apply.__doc__ = op.__doc__
+ cls.__call__ = partial_apply
+ return cls
+
+
+def register(cls):
+ """
+ Register a given module class.
+
+ Args:
+ cls (type): Module class to be registered.
+
+ Returns: cls
+ """
+ if cls.__name__ in global_config:
+ raise ValueError("Module class already registered: {}".format(
+ cls.__name__))
+ if hasattr(cls, '__op__'):
+ cls = make_partial(cls)
+ global_config[cls.__name__] = extract_schema(cls)
+ return cls
+
+
+def create(cls_or_name, **kwargs):
+ """
+ Create an instance of given module class.
+
+ Args:
+ cls_or_name (type or str): Class of which to create instance.
+
+ Returns: instance of type `cls_or_name`
+ """
+ assert type(cls_or_name) in [type, str
+ ], "should be a class or name of a class"
+ name = type(cls_or_name) == str and cls_or_name or cls_or_name.__name__
+ assert name in global_config and \
+ isinstance(global_config[name], SchemaDict), \
+ "the module {} is not registered".format(name)
+ config = global_config[name]
+ cls = getattr(config.pymodule, name)
+ cls_kwargs = {}
+ cls_kwargs.update(global_config[name])
+
+ # parse `shared` annoation of registered modules
+ if getattr(config, 'shared', None):
+ for k in config.shared:
+ target_key = config[k]
+ shared_conf = config.schema[k].default
+ assert isinstance(shared_conf, SharedConfig)
+ if target_key is not None and not isinstance(target_key,
+ SharedConfig):
+ continue # value is given for the module
+ elif shared_conf.key in global_config:
+ # `key` is present in config
+ cls_kwargs[k] = global_config[shared_conf.key]
+ else:
+ cls_kwargs[k] = shared_conf.default_value
+
+ # parse `inject` annoation of registered modules
+ if getattr(cls, 'from_config', None):
+ cls_kwargs.update(cls.from_config(config, **kwargs))
+
+ if getattr(config, 'inject', None):
+ for k in config.inject:
+ target_key = config[k]
+ # optional dependency
+ if target_key is None:
+ continue
+
+ if isinstance(target_key, dict) or hasattr(target_key, '__dict__'):
+ if 'name' not in target_key.keys():
+ continue
+ inject_name = str(target_key['name'])
+ if inject_name not in global_config:
+ raise ValueError(
+ "Missing injection name {} and check it's name in cfg file".
+ format(k))
+ target = global_config[inject_name]
+ for i, v in target_key.items():
+ if i == 'name':
+ continue
+ target[i] = v
+ if isinstance(target, SchemaDict):
+ cls_kwargs[k] = create(inject_name)
+ elif isinstance(target_key, str):
+ if target_key not in global_config:
+ raise ValueError("Missing injection config:", target_key)
+ target = global_config[target_key]
+ if isinstance(target, SchemaDict):
+ cls_kwargs[k] = create(target_key)
+ elif hasattr(target, '__dict__'): # serialized object
+ cls_kwargs[k] = target
+ else:
+ raise ValueError("Unsupported injection type:", target_key)
+ # prevent modification of global config values of reference types
+ # (e.g., list, dict) from within the created module instances
+ #kwargs = copy.deepcopy(kwargs)
+ return cls(**cls_kwargs)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__init__.py
new file mode 100644
index 000000000..a12aa323e
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import source
+from . import transform
+from . import reader
+
+from .source import *
+from .transform import *
+from .reader import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..b366ebbaa
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__pycache__/reader.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__pycache__/reader.cpython-37.pyc
new file mode 100644
index 000000000..c7c369faf
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__pycache__/reader.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__pycache__/shm_utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__pycache__/shm_utils.cpython-37.pyc
new file mode 100644
index 000000000..63a13890f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/__pycache__/shm_utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__init__.py
new file mode 100644
index 000000000..61d5aa213
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
\ No newline at end of file
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..8c07ff43d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__pycache__/annotation_cropper.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__pycache__/annotation_cropper.cpython-37.pyc
new file mode 100644
index 000000000..31df2fcf5
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__pycache__/annotation_cropper.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__pycache__/chip_box_utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__pycache__/chip_box_utils.cpython-37.pyc
new file mode 100644
index 000000000..f24cf29d4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/__pycache__/chip_box_utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/annotation_cropper.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/annotation_cropper.py
new file mode 100644
index 000000000..93a9a1f75
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/annotation_cropper.py
@@ -0,0 +1,542 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import math
+import random
+import numpy as np
+from copy import deepcopy
+from typing import List, Tuple
+from collections import defaultdict
+
+from .chip_box_utils import nms, transform_chip_boxes2image_boxes
+from .chip_box_utils import find_chips_to_cover_overlaped_boxes
+from .chip_box_utils import transform_chip_box
+from .chip_box_utils import intersection_over_box
+
+
+class AnnoCropper(object):
+ def __init__(self, image_target_sizes: List[int],
+ valid_box_ratio_ranges: List[List[float]],
+ chip_target_size: int, chip_target_stride: int,
+ use_neg_chip: bool = False,
+ max_neg_num_per_im: int = 8,
+ max_per_img: int = -1,
+ nms_thresh: int = 0.5
+ ):
+ """
+ Generate chips by chip_target_size and chip_target_stride.
+ These two parameters just like kernel_size and stride in cnn.
+
+ Each image has its raw size. After resizing, then get its target size.
+ The resizing scale = target_size / raw_size.
+ So are chips of the image.
+ box_ratio = box_raw_size / image_raw_size = box_target_size / image_target_size
+ The 'size' above mentioned is the size of long-side of image, box or chip.
+
+ :param image_target_sizes: [2000, 1000]
+ :param valid_box_ratio_ranges: [[-1, 0.1],[0.08, -1]]
+ :param chip_target_size: 500
+ :param chip_target_stride: 200
+ """
+ self.target_sizes = image_target_sizes
+ self.valid_box_ratio_ranges = valid_box_ratio_ranges
+ assert len(self.target_sizes) == len(self.valid_box_ratio_ranges)
+ self.scale_num = len(self.target_sizes)
+ self.chip_target_size = chip_target_size # is target size
+ self.chip_target_stride = chip_target_stride # is target stride
+ self.use_neg_chip = use_neg_chip
+ self.max_neg_num_per_im = max_neg_num_per_im
+ self.max_per_img = max_per_img
+ self.nms_thresh = nms_thresh
+
+ def crop_anno_records(self, records: List[dict]):
+ """
+ The main logic:
+ # foreach record(image):
+ # foreach scale:
+ # 1 generate chips by chip size and stride for each scale
+ # 2 get pos chips
+ # - validate boxes: current scale; h,w >= 1
+ # - find pos chips greedily by valid gt boxes in each scale
+ # - for every valid gt box, find its corresponding pos chips in each scale
+ # 3 get neg chips
+ # - If given proposals, find neg boxes in them which are not in pos chips
+ # - If got neg boxes in last step, we find neg chips and assign neg boxes to neg chips such as 2.
+ # 4 sample neg chips if too much each image
+ # transform this image-scale annotations to chips(pos chips&neg chips) annotations
+
+ :param records, standard coco_record but with extra key `proposals`(Px4), which are predicted by stage1
+ model and maybe have neg boxes in them.
+ :return: new_records, list of dict like
+ {
+ 'im_file': 'fake_image1.jpg',
+ 'im_id': np.array([1]), # new _global_chip_id as im_id
+ 'h': h, # chip height
+ 'w': w, # chip width
+ 'is_crowd': is_crowd, # Nx1 -> Mx1
+ 'gt_class': gt_class, # Nx1 -> Mx1
+ 'gt_bbox': gt_bbox, # Nx4 -> Mx4, 4 represents [x1,y1,x2,y2]
+ 'gt_poly': gt_poly, # [None]xN -> [None]xM
+ 'chip': [x1, y1, x2, y2] # added
+ }
+
+ Attention:
+ ------------------------------>x
+ |
+ | (x1,y1)------
+ | | |
+ | | |
+ | | |
+ | | |
+ | | |
+ | ----------
+ | (x2,y2)
+ |
+ ↓
+ y
+
+ If we use [x1, y1, x2, y2] to represent boxes or chips,
+ (x1,y1) is the left-top point which is in the box,
+ but (x2,y2) is the right-bottom point which is not in the box.
+ So x1 in [0, w-1], x2 in [1, w], y1 in [0, h-1], y2 in [1,h].
+ And you can use x2-x1 to get width, and you can use image[y1:y2, x1:x2] to get the box area.
+ """
+
+ self.chip_records = []
+ self._global_chip_id = 1
+ for r in records:
+ self._cur_im_pos_chips = [] # element: (chip, boxes_idx), chip is [x1, y1, x2, y2], boxes_ids is List[int]
+ self._cur_im_neg_chips = [] # element: (chip, neg_box_num)
+ for scale_i in range(self.scale_num):
+ self._get_current_scale_parameters(scale_i, r)
+
+ # Cx4
+ chips = self._create_chips(r['h'], r['w'], self._cur_scale)
+
+ # # dict: chipid->[box_id, ...]
+ pos_chip2boxes_idx = self._get_valid_boxes_and_pos_chips(r['gt_bbox'], chips)
+
+ # dict: chipid->neg_box_num
+ neg_chip2box_num = self._get_neg_boxes_and_chips(chips, list(pos_chip2boxes_idx.keys()), r.get('proposals', None))
+
+ self._add_to_cur_im_chips(chips, pos_chip2boxes_idx, neg_chip2box_num)
+
+ cur_image_records = self._trans_all_chips2annotations(r)
+ self.chip_records.extend(cur_image_records)
+ return self.chip_records
+
+ def _add_to_cur_im_chips(self, chips, pos_chip2boxes_idx, neg_chip2box_num):
+ for pos_chipid, boxes_idx in pos_chip2boxes_idx.items():
+ chip = np.array(chips[pos_chipid]) # copy chips slice
+ self._cur_im_pos_chips.append((chip, boxes_idx))
+
+ if neg_chip2box_num is None:
+ return
+
+ for neg_chipid, neg_box_num in neg_chip2box_num.items():
+ chip = np.array(chips[neg_chipid])
+ self._cur_im_neg_chips.append((chip, neg_box_num))
+
+ def _trans_all_chips2annotations(self, r):
+ gt_bbox = r['gt_bbox']
+ im_file = r['im_file']
+ is_crowd = r['is_crowd']
+ gt_class = r['gt_class']
+ # gt_poly = r['gt_poly'] # [None]xN
+ # remaining keys: im_id, h, w
+ chip_records = self._trans_pos_chips2annotations(im_file, gt_bbox, is_crowd, gt_class)
+
+ if not self.use_neg_chip:
+ return chip_records
+
+ sampled_neg_chips = self._sample_neg_chips()
+ neg_chip_records = self._trans_neg_chips2annotations(im_file, sampled_neg_chips)
+ chip_records.extend(neg_chip_records)
+ return chip_records
+
+ def _trans_pos_chips2annotations(self, im_file, gt_bbox, is_crowd, gt_class):
+ chip_records = []
+ for chip, boxes_idx in self._cur_im_pos_chips:
+ chip_bbox, final_boxes_idx = transform_chip_box(gt_bbox, boxes_idx, chip)
+ x1, y1, x2, y2 = chip
+ chip_h = y2 - y1
+ chip_w = x2 - x1
+ rec = {
+ 'im_file': im_file,
+ 'im_id': np.array([self._global_chip_id]),
+ 'h': chip_h,
+ 'w': chip_w,
+ 'gt_bbox': chip_bbox,
+ 'is_crowd': is_crowd[final_boxes_idx].copy(),
+ 'gt_class': gt_class[final_boxes_idx].copy(),
+ # 'gt_poly': [None] * len(final_boxes_idx),
+ 'chip': chip
+ }
+ self._global_chip_id += 1
+ chip_records.append(rec)
+ return chip_records
+
+ def _sample_neg_chips(self):
+ pos_num = len(self._cur_im_pos_chips)
+ neg_num = len(self._cur_im_neg_chips)
+ sample_num = min(pos_num + 2, self.max_neg_num_per_im)
+ assert sample_num >= 1
+ if neg_num <= sample_num:
+ return self._cur_im_neg_chips
+
+ candidate_num = int(sample_num * 1.5)
+ candidate_neg_chips = sorted(self._cur_im_neg_chips, key=lambda x: -x[1])[:candidate_num]
+ random.shuffle(candidate_neg_chips)
+ sampled_neg_chips = candidate_neg_chips[:sample_num]
+ return sampled_neg_chips
+
+ def _trans_neg_chips2annotations(self, im_file: str, sampled_neg_chips: List[Tuple]):
+ chip_records = []
+ for chip, neg_box_num in sampled_neg_chips:
+ x1, y1, x2, y2 = chip
+ chip_h = y2 - y1
+ chip_w = x2 - x1
+ rec = {
+ 'im_file': im_file,
+ 'im_id': np.array([self._global_chip_id]),
+ 'h': chip_h,
+ 'w': chip_w,
+ 'gt_bbox': np.zeros((0, 4), dtype=np.float32),
+ 'is_crowd': np.zeros((0, 1), dtype=np.int32),
+ 'gt_class': np.zeros((0, 1), dtype=np.int32),
+ # 'gt_poly': [],
+ 'chip': chip
+ }
+ self._global_chip_id += 1
+ chip_records.append(rec)
+ return chip_records
+
+ def _get_current_scale_parameters(self, scale_i, r):
+ im_size = max(r['h'], r['w'])
+ im_target_size = self.target_sizes[scale_i]
+ self._cur_im_size, self._cur_im_target_size = im_size, im_target_size
+ self._cur_scale = self._get_current_scale(im_target_size, im_size)
+ self._cur_valid_ratio_range = self.valid_box_ratio_ranges[scale_i]
+
+ def _get_current_scale(self, im_target_size, im_size):
+ return im_target_size / im_size
+
+ def _create_chips(self, h: int, w: int, scale: float):
+ """
+ Generate chips by chip_target_size and chip_target_stride.
+ These two parameters just like kernel_size and stride in cnn.
+ :return: chips, Cx4, xy in raw size dimension
+ """
+ chip_size = self.chip_target_size # omit target for simplicity
+ stride = self.chip_target_stride
+ width = int(scale * w)
+ height = int(scale * h)
+ min_chip_location_diff = 20 # in target size
+
+ assert chip_size >= stride
+ chip_overlap = chip_size - stride
+ if (width - chip_overlap) % stride > min_chip_location_diff: # 不能被stride整除的部分比较大,则保留
+ w_steps = max(1, int(math.ceil((width - chip_overlap) / stride)))
+ else: # 不能被stride整除的部分比较小,则丢弃
+ w_steps = max(1, int(math.floor((width - chip_overlap) / stride)))
+ if (height - chip_overlap) % stride > min_chip_location_diff:
+ h_steps = max(1, int(math.ceil((height - chip_overlap) / stride)))
+ else:
+ h_steps = max(1, int(math.floor((height - chip_overlap) / stride)))
+
+ chips = list()
+ for j in range(h_steps):
+ for i in range(w_steps):
+ x1 = i * stride
+ y1 = j * stride
+ x2 = min(x1 + chip_size, width)
+ y2 = min(y1 + chip_size, height)
+ chips.append([x1, y1, x2, y2])
+
+ # check chip size
+ for item in chips:
+ if item[2] - item[0] > chip_size * 1.1 or item[3] - item[1] > chip_size * 1.1:
+ raise ValueError(item)
+ chips = np.array(chips, dtype=np.float)
+
+ raw_size_chips = chips / scale
+ return raw_size_chips
+
+ def _get_valid_boxes_and_pos_chips(self, gt_bbox, chips):
+ valid_ratio_range = self._cur_valid_ratio_range
+ im_size = self._cur_im_size
+ scale = self._cur_scale
+ # Nx4 N
+ valid_boxes, valid_boxes_idx = self._validate_boxes(valid_ratio_range, im_size, gt_bbox, scale)
+ # dict: chipid->[box_id, ...]
+ pos_chip2boxes_idx = self._find_pos_chips(chips, valid_boxes, valid_boxes_idx)
+ return pos_chip2boxes_idx
+
+ def _validate_boxes(self, valid_ratio_range: List[float],
+ im_size: int,
+ gt_boxes: 'np.array of Nx4',
+ scale: float):
+ """
+ :return: valid_boxes: Nx4, valid_boxes_idx: N
+ """
+ ws = (gt_boxes[:, 2] - gt_boxes[:, 0]).astype(np.int32)
+ hs = (gt_boxes[:, 3] - gt_boxes[:, 1]).astype(np.int32)
+ maxs = np.maximum(ws, hs)
+ box_ratio = maxs / im_size
+ mins = np.minimum(ws, hs)
+ target_mins = mins * scale
+
+ low = valid_ratio_range[0] if valid_ratio_range[0] > 0 else 0
+ high = valid_ratio_range[1] if valid_ratio_range[1] > 0 else np.finfo(np.float).max
+
+ valid_boxes_idx = np.nonzero((low <= box_ratio) & (box_ratio < high) & (target_mins >= 2))[0]
+ valid_boxes = gt_boxes[valid_boxes_idx]
+ return valid_boxes, valid_boxes_idx
+
+ def _find_pos_chips(self, chips: 'Cx4', valid_boxes: 'Bx4', valid_boxes_idx: 'B'):
+ """
+ :return: pos_chip2boxes_idx, dict: chipid->[box_id, ...]
+ """
+ iob = intersection_over_box(chips, valid_boxes) # overlap, CxB
+
+ iob_threshold_to_find_chips = 1.
+ pos_chip_ids, _ = self._find_chips_to_cover_overlaped_boxes(iob, iob_threshold_to_find_chips)
+ pos_chip_ids = set(pos_chip_ids)
+
+ iob_threshold_to_assign_box = 0.5
+ pos_chip2boxes_idx = self._assign_boxes_to_pos_chips(
+ iob, iob_threshold_to_assign_box, pos_chip_ids, valid_boxes_idx)
+ return pos_chip2boxes_idx
+
+ def _find_chips_to_cover_overlaped_boxes(self, iob, overlap_threshold):
+ return find_chips_to_cover_overlaped_boxes(iob, overlap_threshold)
+
+ def _assign_boxes_to_pos_chips(self, iob, overlap_threshold, pos_chip_ids, valid_boxes_idx):
+ chip_ids, box_ids = np.nonzero(iob >= overlap_threshold)
+ pos_chip2boxes_idx = defaultdict(list)
+ for chip_id, box_id in zip(chip_ids, box_ids):
+ if chip_id not in pos_chip_ids:
+ continue
+ raw_gt_box_idx = valid_boxes_idx[box_id]
+ pos_chip2boxes_idx[chip_id].append(raw_gt_box_idx)
+ return pos_chip2boxes_idx
+
+ def _get_neg_boxes_and_chips(self, chips: 'Cx4', pos_chip_ids: 'D', proposals: 'Px4'):
+ """
+ :param chips:
+ :param pos_chip_ids:
+ :param proposals:
+ :return: neg_chip2box_num, None or dict: chipid->neg_box_num
+ """
+ if not self.use_neg_chip:
+ return None
+
+ # train proposals maybe None
+ if proposals is None or len(proposals) < 1:
+ return None
+
+ valid_ratio_range = self._cur_valid_ratio_range
+ im_size = self._cur_im_size
+ scale = self._cur_scale
+
+ valid_props, _ = self._validate_boxes(valid_ratio_range, im_size, proposals, scale)
+ neg_boxes = self._find_neg_boxes(chips, pos_chip_ids, valid_props)
+ neg_chip2box_num = self._find_neg_chips(chips, pos_chip_ids, neg_boxes)
+ return neg_chip2box_num
+
+ def _find_neg_boxes(self, chips: 'Cx4', pos_chip_ids: 'D', valid_props: 'Px4'):
+ """
+ :return: neg_boxes: Nx4
+ """
+ if len(pos_chip_ids) == 0:
+ return valid_props
+
+ pos_chips = chips[pos_chip_ids]
+ iob = intersection_over_box(pos_chips, valid_props)
+ overlap_per_prop = np.max(iob, axis=0)
+ non_overlap_props_idx = overlap_per_prop < 0.5
+ neg_boxes = valid_props[non_overlap_props_idx]
+ return neg_boxes
+
+ def _find_neg_chips(self, chips: 'Cx4', pos_chip_ids: 'D', neg_boxes: 'Nx4'):
+ """
+ :return: neg_chip2box_num, dict: chipid->neg_box_num
+ """
+ neg_chip_ids = np.setdiff1d(np.arange(len(chips)), pos_chip_ids)
+ neg_chips = chips[neg_chip_ids]
+
+ iob = intersection_over_box(neg_chips, neg_boxes)
+ iob_threshold_to_find_chips = 0.7
+ chosen_neg_chip_ids, chip_id2overlap_box_num = \
+ self._find_chips_to_cover_overlaped_boxes(iob, iob_threshold_to_find_chips)
+
+ neg_chipid2box_num = {}
+ for cid in chosen_neg_chip_ids:
+ box_num = chip_id2overlap_box_num[cid]
+ raw_chip_id = neg_chip_ids[cid]
+ neg_chipid2box_num[raw_chip_id] = box_num
+ return neg_chipid2box_num
+
+ def crop_infer_anno_records(self, records: List[dict]):
+ """
+ transform image record to chips record
+ :param records:
+ :return: new_records, list of dict like
+ {
+ 'im_file': 'fake_image1.jpg',
+ 'im_id': np.array([1]), # new _global_chip_id as im_id
+ 'h': h, # chip height
+ 'w': w, # chip width
+ 'chip': [x1, y1, x2, y2] # added
+ 'ori_im_h': ori_im_h # added, origin image height
+ 'ori_im_w': ori_im_w # added, origin image width
+ 'scale_i': 0 # added,
+ }
+ """
+ self.chip_records = []
+ self._global_chip_id = 1 # im_id start from 1
+ self._global_chip_id2img_id = {}
+
+ for r in records:
+ for scale_i in range(self.scale_num):
+ self._get_current_scale_parameters(scale_i, r)
+ # Cx4
+ chips = self._create_chips(r['h'], r['w'], self._cur_scale)
+ cur_img_chip_record = self._get_chips_records(r, chips, scale_i)
+ self.chip_records.extend(cur_img_chip_record)
+
+ return self.chip_records
+
+ def _get_chips_records(self, rec, chips, scale_i):
+ cur_img_chip_records = []
+ ori_im_h = rec["h"]
+ ori_im_w = rec["w"]
+ im_file = rec["im_file"]
+ ori_im_id = rec["im_id"]
+ for id, chip in enumerate(chips):
+ chip_rec = {}
+ x1, y1, x2, y2 = chip
+ chip_h = y2 - y1
+ chip_w = x2 - x1
+ chip_rec["im_file"] = im_file
+ chip_rec["im_id"] = self._global_chip_id
+ chip_rec["h"] = chip_h
+ chip_rec["w"] = chip_w
+ chip_rec["chip"] = chip
+ chip_rec["ori_im_h"] = ori_im_h
+ chip_rec["ori_im_w"] = ori_im_w
+ chip_rec["scale_i"] = scale_i
+
+ self._global_chip_id2img_id[self._global_chip_id] = int(ori_im_id)
+ self._global_chip_id += 1
+ cur_img_chip_records.append(chip_rec)
+
+ return cur_img_chip_records
+
+ def aggregate_chips_detections(self, results, records=None):
+ """
+ # 1. transform chip dets to image dets
+ # 2. nms boxes per image;
+ # 3. format output results
+ :param results:
+ :param roidb:
+ :return:
+ """
+ results = deepcopy(results)
+ records = records if records else self.chip_records
+ img_id2bbox = self._transform_chip2image_bboxes(results, records)
+ nms_img_id2bbox = self._nms_dets(img_id2bbox)
+ aggregate_results = self._reformat_results(nms_img_id2bbox)
+ return aggregate_results
+
+ def _transform_chip2image_bboxes(self, results, records):
+ # 1. Transform chip dets to image dets;
+ # 2. Filter valid range;
+ # 3. Reformat and Aggregate chip dets to Get scale_cls_dets
+ img_id2bbox = defaultdict(list)
+ for result in results:
+ bbox_locs = result['bbox']
+ bbox_nums = result['bbox_num']
+ if len(bbox_locs) == 1 and bbox_locs[0][0] == -1: # current batch has no detections
+ # bbox_locs = array([[-1.]], dtype=float32); bbox_nums = [[1]]
+ # MultiClassNMS output: If there is no detected boxes for all images, lod will be set to {1} and Out only contains one value which is -1.
+ continue
+ im_ids = result['im_id'] # replace with range(len(bbox_nums))
+
+ last_bbox_num = 0
+ for idx, im_id in enumerate(im_ids):
+
+ cur_bbox_len = bbox_nums[idx]
+ bboxes = bbox_locs[last_bbox_num: last_bbox_num + cur_bbox_len]
+ last_bbox_num += cur_bbox_len
+ # box: [num_id, score, xmin, ymin, xmax, ymax]
+ if len(bboxes) == 0: # current image has no detections
+ continue
+
+ chip_rec = records[int(im_id) - 1] # im_id starts from 1, type is np.int64
+ image_size = max(chip_rec["ori_im_h"], chip_rec["ori_im_w"])
+
+ bboxes = transform_chip_boxes2image_boxes(bboxes, chip_rec["chip"], chip_rec["ori_im_h"], chip_rec["ori_im_w"])
+
+ scale_i = chip_rec["scale_i"]
+ cur_scale = self._get_current_scale(self.target_sizes[scale_i], image_size)
+ _, valid_boxes_idx = self._validate_boxes(self.valid_box_ratio_ranges[scale_i], image_size,
+ bboxes[:, 2:], cur_scale)
+ ori_img_id = self._global_chip_id2img_id[int(im_id)]
+
+ img_id2bbox[ori_img_id].append(bboxes[valid_boxes_idx])
+
+ return img_id2bbox
+
+ def _nms_dets(self, img_id2bbox):
+ # 1. NMS on each image-class
+ # 2. Limit number of detections to MAX_PER_IMAGE if requested
+ max_per_img = self.max_per_img
+ nms_thresh = self.nms_thresh
+
+ for img_id in img_id2bbox:
+ box = img_id2bbox[img_id] # list of np.array of shape [N, 6], 6 is [label, score, x1, y1, x2, y2]
+ box = np.concatenate(box, axis=0)
+ nms_dets = nms(box, nms_thresh)
+ if max_per_img > 0:
+ if len(nms_dets) > max_per_img:
+ keep = np.argsort(-nms_dets[:, 1])[:max_per_img]
+ nms_dets = nms_dets[keep]
+
+ img_id2bbox[img_id] = nms_dets
+
+ return img_id2bbox
+
+ def _reformat_results(self, img_id2bbox):
+ """reformat results"""
+ im_ids = img_id2bbox.keys()
+ results = []
+ for img_id in im_ids: # output by original im_id order
+ if len(img_id2bbox[img_id]) == 0:
+ bbox = np.array([[-1., 0., 0., 0., 0., 0.]]) # edge case: no detections
+ bbox_num = np.array([0])
+ else:
+ # np.array of shape [N, 6], 6 is [label, score, x1, y1, x2, y2]
+ bbox = img_id2bbox[img_id]
+ bbox_num = np.array([len(bbox)])
+ res = dict(
+ im_id=np.array([[img_id]]),
+ bbox=bbox,
+ bbox_num=bbox_num
+ )
+ results.append(res)
+ return results
+
+
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/chip_box_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/chip_box_utils.py
new file mode 100644
index 000000000..d6e81a165
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/crop_utils/chip_box_utils.py
@@ -0,0 +1,166 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+
+
+def bbox_area(boxes):
+ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
+
+
+def intersection_over_box(chips, boxes):
+ """
+ intersection area over box area
+ :param chips: C
+ :param boxes: B
+ :return: iob, CxB
+ """
+ M = chips.shape[0]
+ N = boxes.shape[0]
+ if M * N == 0:
+ return np.zeros([M, N], dtype='float32')
+
+ box_area = bbox_area(boxes) # B
+
+ inter_x2y2 = np.minimum(np.expand_dims(chips, 1)[:, :, 2:], boxes[:, 2:]) # CxBX2
+ inter_x1y1 = np.maximum(np.expand_dims(chips, 1)[:, :, :2], boxes[:, :2]) # CxBx2
+ inter_wh = inter_x2y2 - inter_x1y1
+ inter_wh = np.clip(inter_wh, a_min=0, a_max=None)
+ inter_area = inter_wh[:, :, 0] * inter_wh[:, :, 1] # CxB
+
+ iob = inter_area / np.expand_dims(box_area, 0)
+ return iob
+
+
+def clip_boxes(boxes, im_shape):
+ """
+ Clip boxes to image boundaries.
+ :param boxes: [N, 4]
+ :param im_shape: tuple of 2, [h, w]
+ :return: [N, 4]
+ """
+ # x1 >= 0
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, im_shape[1] - 1)
+ # y1 >= 0
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, im_shape[0] - 1)
+ # x2 < im_shape[1]
+ boxes[:, 2] = np.clip(boxes[:, 2], 1, im_shape[1])
+ # y2 < im_shape[0]
+ boxes[:, 3] = np.clip(boxes[:, 3], 1, im_shape[0])
+ return boxes
+
+
+def transform_chip_box(gt_bbox: 'Gx4', boxes_idx: 'B', chip: '4'):
+ boxes_idx = np.array(boxes_idx)
+ cur_gt_bbox = gt_bbox[boxes_idx].copy() # Bx4
+ x1, y1, x2, y2 = chip
+ cur_gt_bbox[:, 0] -= x1
+ cur_gt_bbox[:, 1] -= y1
+ cur_gt_bbox[:, 2] -= x1
+ cur_gt_bbox[:, 3] -= y1
+ h = y2 - y1
+ w = x2 - x1
+ cur_gt_bbox = clip_boxes(cur_gt_bbox, (h, w))
+ ws = (cur_gt_bbox[:, 2] - cur_gt_bbox[:, 0]).astype(np.int32)
+ hs = (cur_gt_bbox[:, 3] - cur_gt_bbox[:, 1]).astype(np.int32)
+ valid_idx = (ws >= 2) & (hs >= 2)
+ return cur_gt_bbox[valid_idx], boxes_idx[valid_idx]
+
+
+def find_chips_to_cover_overlaped_boxes(iob, overlap_threshold):
+ chip_ids, box_ids = np.nonzero(iob >= overlap_threshold)
+ chip_id2overlap_box_num = np.bincount(chip_ids) # 1d array
+ chip_id2overlap_box_num = np.pad(chip_id2overlap_box_num, (0, len(iob) - len(chip_id2overlap_box_num)),
+ constant_values=0)
+
+ chosen_chip_ids = []
+ while len(box_ids) > 0:
+ value_counts = np.bincount(chip_ids) # 1d array
+ max_count_chip_id = np.argmax(value_counts)
+ assert max_count_chip_id not in chosen_chip_ids
+ chosen_chip_ids.append(max_count_chip_id)
+
+ box_ids_in_cur_chip = box_ids[chip_ids == max_count_chip_id]
+ ids_not_in_cur_boxes_mask = np.logical_not(np.isin(box_ids, box_ids_in_cur_chip))
+ chip_ids = chip_ids[ids_not_in_cur_boxes_mask]
+ box_ids = box_ids[ids_not_in_cur_boxes_mask]
+ return chosen_chip_ids, chip_id2overlap_box_num
+
+
+def transform_chip_boxes2image_boxes(chip_boxes, chip, img_h, img_w):
+ chip_boxes = np.array(sorted(chip_boxes, key=lambda item: -item[1]))
+ xmin, ymin, _, _ = chip
+ # Transform to origin image loc
+ chip_boxes[:, 2] += xmin
+ chip_boxes[:, 4] += xmin
+ chip_boxes[:, 3] += ymin
+ chip_boxes[:, 5] += ymin
+ chip_boxes = clip_boxes(chip_boxes, (img_h, img_w))
+ return chip_boxes
+
+
+def nms(dets, thresh):
+ """Apply classic DPM-style greedy NMS."""
+ if dets.shape[0] == 0:
+ return dets[[], :]
+ scores = dets[:, 1]
+ x1 = dets[:, 2]
+ y1 = dets[:, 3]
+ x2 = dets[:, 4]
+ y2 = dets[:, 5]
+
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+ order = scores.argsort()[::-1]
+
+ ndets = dets.shape[0]
+ suppressed = np.zeros((ndets), dtype=np.int)
+
+ # nominal indices
+ # _i, _j
+ # sorted indices
+ # i, j
+ # temp variables for box i's (the box currently under consideration)
+ # ix1, iy1, ix2, iy2, iarea
+
+ # variables for computing overlap with box j (lower scoring box)
+ # xx1, yy1, xx2, yy2
+ # w, h
+ # inter, ovr
+
+ for _i in range(ndets):
+ i = order[_i]
+ if suppressed[i] == 1:
+ continue
+ ix1 = x1[i]
+ iy1 = y1[i]
+ ix2 = x2[i]
+ iy2 = y2[i]
+ iarea = areas[i]
+ for _j in range(_i + 1, ndets):
+ j = order[_j]
+ if suppressed[j] == 1:
+ continue
+ xx1 = max(ix1, x1[j])
+ yy1 = max(iy1, y1[j])
+ xx2 = min(ix2, x2[j])
+ yy2 = min(iy2, y2[j])
+ w = max(0.0, xx2 - xx1 + 1)
+ h = max(0.0, yy2 - yy1 + 1)
+ inter = w * h
+ ovr = inter / (iarea + areas[j] - inter)
+ if ovr >= thresh:
+ suppressed[j] = 1
+ keep = np.where(suppressed == 0)[0]
+ dets = dets[keep, :]
+ return dets
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/reader.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/reader.py
new file mode 100644
index 000000000..c9ea09af2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/reader.py
@@ -0,0 +1,302 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import traceback
+import six
+import sys
+if sys.version_info >= (3, 0):
+ pass
+else:
+ pass
+import numpy as np
+
+from paddle.io import DataLoader, DistributedBatchSampler
+from paddle.fluid.dataloader.collate import default_collate_fn
+
+from ppdet.core.workspace import register
+from . import transform
+from .shm_utils import _get_shared_memory_size_in_M
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger('reader')
+
+MAIN_PID = os.getpid()
+
+
+class Compose(object):
+ def __init__(self, transforms, num_classes=80):
+ self.transforms = transforms
+ self.transforms_cls = []
+ for t in self.transforms:
+ for k, v in t.items():
+ op_cls = getattr(transform, k)
+ f = op_cls(**v)
+ if hasattr(f, 'num_classes'):
+ f.num_classes = num_classes
+
+ self.transforms_cls.append(f)
+
+ def __call__(self, data):
+ for f in self.transforms_cls:
+ try:
+ data = f(data)
+ except Exception as e:
+ stack_info = traceback.format_exc()
+ logger.warning("fail to map sample transform [{}] "
+ "with error: {} and stack:\n{}".format(
+ f, e, str(stack_info)))
+ raise e
+
+ return data
+
+
+class BatchCompose(Compose):
+ def __init__(self, transforms, num_classes=80, collate_batch=True):
+ super(BatchCompose, self).__init__(transforms, num_classes)
+ self.collate_batch = collate_batch
+
+ def __call__(self, data):
+ for f in self.transforms_cls:
+ try:
+ data = f(data)
+ except Exception as e:
+ stack_info = traceback.format_exc()
+ logger.warning("fail to map batch transform [{}] "
+ "with error: {} and stack:\n{}".format(
+ f, e, str(stack_info)))
+ raise e
+
+ # remove keys which is not needed by model
+ extra_key = ['h', 'w', 'flipped']
+ for k in extra_key:
+ for sample in data:
+ if k in sample:
+ sample.pop(k)
+
+ # batch data, if user-define batch function needed
+ # use user-defined here
+ if self.collate_batch:
+ batch_data = default_collate_fn(data)
+ else:
+ batch_data = {}
+ for k in data[0].keys():
+ tmp_data = []
+ for i in range(len(data)):
+ tmp_data.append(data[i][k])
+ if not 'gt_' in k and not 'is_crowd' in k and not 'difficult' in k:
+ tmp_data = np.stack(tmp_data, axis=0)
+ batch_data[k] = tmp_data
+ return batch_data
+
+
+class BaseDataLoader(object):
+ """
+ Base DataLoader implementation for detection models
+
+ Args:
+ sample_transforms (list): a list of transforms to perform
+ on each sample
+ batch_transforms (list): a list of transforms to perform
+ on batch
+ batch_size (int): batch size for batch collating, default 1.
+ shuffle (bool): whether to shuffle samples
+ drop_last (bool): whether to drop the last incomplete,
+ default False
+ num_classes (int): class number of dataset, default 80
+ collate_batch (bool): whether to collate batch in dataloader.
+ If set to True, the samples will collate into batch according
+ to the batch size. Otherwise, the ground-truth will not collate,
+ which is used when the number of ground-truch is different in
+ samples.
+ use_shared_memory (bool): whether to use shared memory to
+ accelerate data loading, enable this only if you
+ are sure that the shared memory size of your OS
+ is larger than memory cost of input datas of model.
+ Note that shared memory will be automatically
+ disabled if the shared memory of OS is less than
+ 1G, which is not enough for detection models.
+ Default False.
+ """
+
+ def __init__(self,
+ sample_transforms=[],
+ batch_transforms=[],
+ batch_size=1,
+ shuffle=False,
+ drop_last=False,
+ num_classes=80,
+ collate_batch=True,
+ use_shared_memory=False,
+ **kwargs):
+ # sample transform
+ self._sample_transforms = Compose(
+ sample_transforms, num_classes=num_classes)
+
+ # batch transfrom
+ self._batch_transforms = BatchCompose(batch_transforms, num_classes,
+ collate_batch)
+ self.batch_size = batch_size
+ self.shuffle = shuffle
+ self.drop_last = drop_last
+ self.use_shared_memory = use_shared_memory
+ self.kwargs = kwargs
+
+ def __call__(self,
+ dataset,
+ worker_num,
+ batch_sampler=None,
+ return_list=False):
+ self.dataset = dataset
+ self.dataset.check_or_download_dataset()
+ self.dataset.parse_dataset()
+ # get data
+ self.dataset.set_transform(self._sample_transforms)
+ # set kwargs
+ self.dataset.set_kwargs(**self.kwargs)
+ # batch sampler
+ if batch_sampler is None:
+ self._batch_sampler = DistributedBatchSampler(
+ self.dataset,
+ batch_size=self.batch_size,
+ shuffle=self.shuffle,
+ drop_last=self.drop_last)
+ else:
+ self._batch_sampler = batch_sampler
+
+ # DataLoader do not start sub-process in Windows and Mac
+ # system, do not need to use shared memory
+ use_shared_memory = self.use_shared_memory and \
+ sys.platform not in ['win32', 'darwin']
+ # check whether shared memory size is bigger than 1G(1024M)
+ if use_shared_memory:
+ shm_size = _get_shared_memory_size_in_M()
+ if shm_size is not None and shm_size < 1024.:
+ logger.warning("Shared memory size is less than 1G, "
+ "disable shared_memory in DataLoader")
+ use_shared_memory = False
+
+ self.dataloader = DataLoader(
+ dataset=self.dataset,
+ batch_sampler=self._batch_sampler,
+ collate_fn=self._batch_transforms,
+ num_workers=worker_num,
+ return_list=return_list,
+ use_shared_memory=use_shared_memory)
+ self.loader = iter(self.dataloader)
+
+ return self
+
+ def __len__(self):
+ return len(self._batch_sampler)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ return next(self.loader)
+ except StopIteration:
+ self.loader = iter(self.dataloader)
+ six.reraise(*sys.exc_info())
+
+ def next(self):
+ # python2 compatibility
+ return self.__next__()
+
+
+@register
+class TrainReader(BaseDataLoader):
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ sample_transforms=[],
+ batch_transforms=[],
+ batch_size=1,
+ shuffle=True,
+ drop_last=True,
+ num_classes=80,
+ collate_batch=True,
+ **kwargs):
+ super(TrainReader, self).__init__(sample_transforms, batch_transforms,
+ batch_size, shuffle, drop_last,
+ num_classes, collate_batch, **kwargs)
+
+
+@register
+class EvalReader(BaseDataLoader):
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ sample_transforms=[],
+ batch_transforms=[],
+ batch_size=1,
+ shuffle=False,
+ drop_last=True,
+ num_classes=80,
+ **kwargs):
+ super(EvalReader, self).__init__(sample_transforms, batch_transforms,
+ batch_size, shuffle, drop_last,
+ num_classes, **kwargs)
+
+
+@register
+class TestReader(BaseDataLoader):
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ sample_transforms=[],
+ batch_transforms=[],
+ batch_size=1,
+ shuffle=False,
+ drop_last=False,
+ num_classes=80,
+ **kwargs):
+ super(TestReader, self).__init__(sample_transforms, batch_transforms,
+ batch_size, shuffle, drop_last,
+ num_classes, **kwargs)
+
+
+@register
+class EvalMOTReader(BaseDataLoader):
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ sample_transforms=[],
+ batch_transforms=[],
+ batch_size=1,
+ shuffle=False,
+ drop_last=False,
+ num_classes=1,
+ **kwargs):
+ super(EvalMOTReader, self).__init__(sample_transforms, batch_transforms,
+ batch_size, shuffle, drop_last,
+ num_classes, **kwargs)
+
+
+@register
+class TestMOTReader(BaseDataLoader):
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ sample_transforms=[],
+ batch_transforms=[],
+ batch_size=1,
+ shuffle=False,
+ drop_last=False,
+ num_classes=1,
+ **kwargs):
+ super(TestMOTReader, self).__init__(sample_transforms, batch_transforms,
+ batch_size, shuffle, drop_last,
+ num_classes, **kwargs)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/shm_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/shm_utils.py
new file mode 100644
index 000000000..38d8ba66c
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/shm_utils.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+SIZE_UNIT = ['K', 'M', 'G', 'T']
+SHM_QUERY_CMD = 'df -h'
+SHM_KEY = 'shm'
+SHM_DEFAULT_MOUNT = '/dev/shm'
+
+# [ shared memory size check ]
+# In detection models, image/target data occupies a lot of memory, and
+# will occupy lots of shared memory in multi-process DataLoader, we use
+# following code to get shared memory size and perform a size check to
+# disable shared memory use if shared memory size is not enough.
+# Shared memory getting process as follows:
+# 1. use `df -h` get all mount info
+# 2. pick up spaces whose mount info contains 'shm'
+# 3. if 'shm' space number is only 1, return its size
+# 4. if there are multiple 'shm' space, try to find the default mount
+# directory '/dev/shm' is Linux-like system, otherwise return the
+# biggest space size.
+
+
+def _parse_size_in_M(size_str):
+ num, unit = size_str[:-1], size_str[-1]
+ assert unit in SIZE_UNIT, \
+ "unknown shm size unit {}".format(unit)
+ return float(num) * \
+ (1024 ** (SIZE_UNIT.index(unit) - 1))
+
+
+def _get_shared_memory_size_in_M():
+ try:
+ df_infos = os.popen(SHM_QUERY_CMD).readlines()
+ except:
+ return None
+ else:
+ shm_infos = []
+ for df_info in df_infos:
+ info = df_info.strip()
+ if info.find(SHM_KEY) >= 0:
+ shm_infos.append(info.split())
+
+ if len(shm_infos) == 0:
+ return None
+ elif len(shm_infos) == 1:
+ return _parse_size_in_M(shm_infos[0][3])
+ else:
+ default_mount_infos = [
+ si for si in shm_infos if si[-1] == SHM_DEFAULT_MOUNT
+ ]
+ if default_mount_infos:
+ return _parse_size_in_M(default_mount_infos[0][3])
+ else:
+ return max([_parse_size_in_M(si[3]) for si in shm_infos])
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__init__.py
new file mode 100644
index 000000000..3854d3d25
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__init__.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import coco
+from . import voc
+from . import widerface
+from . import category
+from . import keypoint_coco
+from . import mot
+from . import sniper_coco
+
+from .coco import *
+from .voc import *
+from .widerface import *
+from .category import *
+from .keypoint_coco import *
+from .mot import *
+from .sniper_coco import SniperCOCODataSet
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..ba53aeca6
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/category.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/category.cpython-37.pyc
new file mode 100644
index 000000000..b482ceca7
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/category.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/coco.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/coco.cpython-37.pyc
new file mode 100644
index 000000000..41c30d1af
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/coco.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/dataset.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/dataset.cpython-37.pyc
new file mode 100644
index 000000000..5111b072a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/dataset.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/keypoint_coco.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/keypoint_coco.cpython-37.pyc
new file mode 100644
index 000000000..86a391729
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/keypoint_coco.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/mot.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/mot.cpython-37.pyc
new file mode 100644
index 000000000..6a3f03ffc
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/mot.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/sniper_coco.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/sniper_coco.cpython-37.pyc
new file mode 100644
index 000000000..aecd87e2f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/sniper_coco.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/voc.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/voc.cpython-37.pyc
new file mode 100644
index 000000000..f028fb109
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/voc.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/widerface.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/widerface.cpython-37.pyc
new file mode 100644
index 000000000..b402cc0cf
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/__pycache__/widerface.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/category.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/category.py
new file mode 100644
index 000000000..9390e54c4
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/category.py
@@ -0,0 +1,904 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+
+from ppdet.data.source.voc import pascalvoc_label
+from ppdet.data.source.widerface import widerface_label
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['get_categories']
+
+
+def get_categories(metric_type, anno_file=None, arch=None):
+ """
+ Get class id to category id map and category id
+ to category name map from annotation file.
+
+ Args:
+ metric_type (str): metric type, currently support 'coco', 'voc', 'oid'
+ and 'widerface'.
+ anno_file (str): annotation file path
+ """
+ if arch == 'keypoint_arch':
+ return (None, {'id': 'keypoint'})
+
+ if metric_type.lower() == 'coco' or metric_type.lower(
+ ) == 'rbox' or metric_type.lower() == 'snipercoco':
+ if anno_file and os.path.isfile(anno_file):
+ # lazy import pycocotools here
+ from pycocotools.coco import COCO
+
+ coco = COCO(anno_file)
+ cats = coco.loadCats(coco.getCatIds())
+
+ clsid2catid = {i: cat['id'] for i, cat in enumerate(cats)}
+ catid2name = {cat['id']: cat['name'] for cat in cats}
+ return clsid2catid, catid2name
+
+ # anno file not exist, load default categories of COCO17
+ else:
+ if metric_type.lower() == 'rbox':
+ return _dota_category()
+
+ return _coco17_category()
+
+ elif metric_type.lower() == 'voc':
+ if anno_file and os.path.isfile(anno_file):
+ cats = []
+ with open(anno_file) as f:
+ for line in f.readlines():
+ cats.append(line.strip())
+
+ if cats[0] == 'background':
+ cats = cats[1:]
+
+ clsid2catid = {i: i for i in range(len(cats))}
+ catid2name = {i: name for i, name in enumerate(cats)}
+
+ return clsid2catid, catid2name
+
+ # anno file not exist, load default categories of
+ # VOC all 20 categories
+ else:
+ return _vocall_category()
+
+ elif metric_type.lower() == 'oid':
+ if anno_file and os.path.isfile(anno_file):
+ logger.warning("only default categories support for OID19")
+ return _oid19_category()
+
+ elif metric_type.lower() == 'widerface':
+ return _widerface_category()
+
+ elif metric_type.lower() == 'keypointtopdowncocoeval' or metric_type.lower(
+ ) == 'keypointtopdownmpiieval':
+ return (None, {'id': 'keypoint'})
+
+ elif metric_type.lower() in ['mot', 'motdet', 'reid']:
+ if anno_file and os.path.isfile(anno_file):
+ cats = []
+ with open(anno_file) as f:
+ for line in f.readlines():
+ cats.append(line.strip())
+ if cats[0] == 'background':
+ cats = cats[1:]
+ clsid2catid = {i: i for i in range(len(cats))}
+ catid2name = {i: name for i, name in enumerate(cats)}
+ return clsid2catid, catid2name
+ # anno file not exist, load default category 'pedestrian'.
+ else:
+ return _mot_category(category='pedestrian')
+
+ elif metric_type.lower() in ['kitti', 'bdd100kmot']:
+ return _mot_category(category='vehicle')
+
+ elif metric_type.lower() in ['mcmot']:
+ if anno_file and os.path.isfile(anno_file):
+ cats = []
+ with open(anno_file) as f:
+ for line in f.readlines():
+ cats.append(line.strip())
+ if cats[0] == 'background':
+ cats = cats[1:]
+ clsid2catid = {i: i for i in range(len(cats))}
+ catid2name = {i: name for i, name in enumerate(cats)}
+ return clsid2catid, catid2name
+ # anno file not exist, load default categories of visdrone all 10 categories
+ else:
+ return _visdrone_category()
+
+ else:
+ raise ValueError("unknown metric type {}".format(metric_type))
+
+
+def _mot_category(category='pedestrian'):
+ """
+ Get class id to category id map and category id
+ to category name map of mot dataset
+ """
+ label_map = {category: 0}
+ label_map = sorted(label_map.items(), key=lambda x: x[1])
+ cats = [l[0] for l in label_map]
+
+ clsid2catid = {i: i for i in range(len(cats))}
+ catid2name = {i: name for i, name in enumerate(cats)}
+
+ return clsid2catid, catid2name
+
+
+def _coco17_category():
+ """
+ Get class id to category id map and category id
+ to category name map of COCO2017 dataset
+
+ """
+ clsid2catid = {
+ 1: 1,
+ 2: 2,
+ 3: 3,
+ 4: 4,
+ 5: 5,
+ 6: 6,
+ 7: 7,
+ 8: 8,
+ 9: 9,
+ 10: 10,
+ 11: 11,
+ 12: 13,
+ 13: 14,
+ 14: 15,
+ 15: 16,
+ 16: 17,
+ 17: 18,
+ 18: 19,
+ 19: 20,
+ 20: 21,
+ 21: 22,
+ 22: 23,
+ 23: 24,
+ 24: 25,
+ 25: 27,
+ 26: 28,
+ 27: 31,
+ 28: 32,
+ 29: 33,
+ 30: 34,
+ 31: 35,
+ 32: 36,
+ 33: 37,
+ 34: 38,
+ 35: 39,
+ 36: 40,
+ 37: 41,
+ 38: 42,
+ 39: 43,
+ 40: 44,
+ 41: 46,
+ 42: 47,
+ 43: 48,
+ 44: 49,
+ 45: 50,
+ 46: 51,
+ 47: 52,
+ 48: 53,
+ 49: 54,
+ 50: 55,
+ 51: 56,
+ 52: 57,
+ 53: 58,
+ 54: 59,
+ 55: 60,
+ 56: 61,
+ 57: 62,
+ 58: 63,
+ 59: 64,
+ 60: 65,
+ 61: 67,
+ 62: 70,
+ 63: 72,
+ 64: 73,
+ 65: 74,
+ 66: 75,
+ 67: 76,
+ 68: 77,
+ 69: 78,
+ 70: 79,
+ 71: 80,
+ 72: 81,
+ 73: 82,
+ 74: 84,
+ 75: 85,
+ 76: 86,
+ 77: 87,
+ 78: 88,
+ 79: 89,
+ 80: 90
+ }
+
+ catid2name = {
+ 0: 'background',
+ 1: 'person',
+ 2: 'bicycle',
+ 3: 'car',
+ 4: 'motorcycle',
+ 5: 'airplane',
+ 6: 'bus',
+ 7: 'train',
+ 8: 'truck',
+ 9: 'boat',
+ 10: 'traffic light',
+ 11: 'fire hydrant',
+ 13: 'stop sign',
+ 14: 'parking meter',
+ 15: 'bench',
+ 16: 'bird',
+ 17: 'cat',
+ 18: 'dog',
+ 19: 'horse',
+ 20: 'sheep',
+ 21: 'cow',
+ 22: 'elephant',
+ 23: 'bear',
+ 24: 'zebra',
+ 25: 'giraffe',
+ 27: 'backpack',
+ 28: 'umbrella',
+ 31: 'handbag',
+ 32: 'tie',
+ 33: 'suitcase',
+ 34: 'frisbee',
+ 35: 'skis',
+ 36: 'snowboard',
+ 37: 'sports ball',
+ 38: 'kite',
+ 39: 'baseball bat',
+ 40: 'baseball glove',
+ 41: 'skateboard',
+ 42: 'surfboard',
+ 43: 'tennis racket',
+ 44: 'bottle',
+ 46: 'wine glass',
+ 47: 'cup',
+ 48: 'fork',
+ 49: 'knife',
+ 50: 'spoon',
+ 51: 'bowl',
+ 52: 'banana',
+ 53: 'apple',
+ 54: 'sandwich',
+ 55: 'orange',
+ 56: 'broccoli',
+ 57: 'carrot',
+ 58: 'hot dog',
+ 59: 'pizza',
+ 60: 'donut',
+ 61: 'cake',
+ 62: 'chair',
+ 63: 'couch',
+ 64: 'potted plant',
+ 65: 'bed',
+ 67: 'dining table',
+ 70: 'toilet',
+ 72: 'tv',
+ 73: 'laptop',
+ 74: 'mouse',
+ 75: 'remote',
+ 76: 'keyboard',
+ 77: 'cell phone',
+ 78: 'microwave',
+ 79: 'oven',
+ 80: 'toaster',
+ 81: 'sink',
+ 82: 'refrigerator',
+ 84: 'book',
+ 85: 'clock',
+ 86: 'vase',
+ 87: 'scissors',
+ 88: 'teddy bear',
+ 89: 'hair drier',
+ 90: 'toothbrush'
+ }
+
+ clsid2catid = {k - 1: v for k, v in clsid2catid.items()}
+ catid2name.pop(0)
+
+ return clsid2catid, catid2name
+
+
+def _dota_category():
+ """
+ Get class id to category id map and category id
+ to category name map of dota dataset
+ """
+ catid2name = {
+ 0: 'background',
+ 1: 'plane',
+ 2: 'baseball-diamond',
+ 3: 'bridge',
+ 4: 'ground-track-field',
+ 5: 'small-vehicle',
+ 6: 'large-vehicle',
+ 7: 'ship',
+ 8: 'tennis-court',
+ 9: 'basketball-court',
+ 10: 'storage-tank',
+ 11: 'soccer-ball-field',
+ 12: 'roundabout',
+ 13: 'harbor',
+ 14: 'swimming-pool',
+ 15: 'helicopter'
+ }
+ catid2name.pop(0)
+ clsid2catid = {i: i + 1 for i in range(len(catid2name))}
+ return clsid2catid, catid2name
+
+
+def _vocall_category():
+ """
+ Get class id to category id map and category id
+ to category name map of mixup voc dataset
+
+ """
+ label_map = pascalvoc_label()
+ label_map = sorted(label_map.items(), key=lambda x: x[1])
+ cats = [l[0] for l in label_map]
+
+ clsid2catid = {i: i for i in range(len(cats))}
+ catid2name = {i: name for i, name in enumerate(cats)}
+
+ return clsid2catid, catid2name
+
+
+def _widerface_category():
+ label_map = widerface_label()
+ label_map = sorted(label_map.items(), key=lambda x: x[1])
+ cats = [l[0] for l in label_map]
+ clsid2catid = {i: i for i in range(len(cats))}
+ catid2name = {i: name for i, name in enumerate(cats)}
+
+ return clsid2catid, catid2name
+
+
+def _oid19_category():
+ clsid2catid = {k: k + 1 for k in range(500)}
+
+ catid2name = {
+ 0: "background",
+ 1: "Infant bed",
+ 2: "Rose",
+ 3: "Flag",
+ 4: "Flashlight",
+ 5: "Sea turtle",
+ 6: "Camera",
+ 7: "Animal",
+ 8: "Glove",
+ 9: "Crocodile",
+ 10: "Cattle",
+ 11: "House",
+ 12: "Guacamole",
+ 13: "Penguin",
+ 14: "Vehicle registration plate",
+ 15: "Bench",
+ 16: "Ladybug",
+ 17: "Human nose",
+ 18: "Watermelon",
+ 19: "Flute",
+ 20: "Butterfly",
+ 21: "Washing machine",
+ 22: "Raccoon",
+ 23: "Segway",
+ 24: "Taco",
+ 25: "Jellyfish",
+ 26: "Cake",
+ 27: "Pen",
+ 28: "Cannon",
+ 29: "Bread",
+ 30: "Tree",
+ 31: "Shellfish",
+ 32: "Bed",
+ 33: "Hamster",
+ 34: "Hat",
+ 35: "Toaster",
+ 36: "Sombrero",
+ 37: "Tiara",
+ 38: "Bowl",
+ 39: "Dragonfly",
+ 40: "Moths and butterflies",
+ 41: "Antelope",
+ 42: "Vegetable",
+ 43: "Torch",
+ 44: "Building",
+ 45: "Power plugs and sockets",
+ 46: "Blender",
+ 47: "Billiard table",
+ 48: "Cutting board",
+ 49: "Bronze sculpture",
+ 50: "Turtle",
+ 51: "Broccoli",
+ 52: "Tiger",
+ 53: "Mirror",
+ 54: "Bear",
+ 55: "Zucchini",
+ 56: "Dress",
+ 57: "Volleyball",
+ 58: "Guitar",
+ 59: "Reptile",
+ 60: "Golf cart",
+ 61: "Tart",
+ 62: "Fedora",
+ 63: "Carnivore",
+ 64: "Car",
+ 65: "Lighthouse",
+ 66: "Coffeemaker",
+ 67: "Food processor",
+ 68: "Truck",
+ 69: "Bookcase",
+ 70: "Surfboard",
+ 71: "Footwear",
+ 72: "Bench",
+ 73: "Necklace",
+ 74: "Flower",
+ 75: "Radish",
+ 76: "Marine mammal",
+ 77: "Frying pan",
+ 78: "Tap",
+ 79: "Peach",
+ 80: "Knife",
+ 81: "Handbag",
+ 82: "Laptop",
+ 83: "Tent",
+ 84: "Ambulance",
+ 85: "Christmas tree",
+ 86: "Eagle",
+ 87: "Limousine",
+ 88: "Kitchen & dining room table",
+ 89: "Polar bear",
+ 90: "Tower",
+ 91: "Football",
+ 92: "Willow",
+ 93: "Human head",
+ 94: "Stop sign",
+ 95: "Banana",
+ 96: "Mixer",
+ 97: "Binoculars",
+ 98: "Dessert",
+ 99: "Bee",
+ 100: "Chair",
+ 101: "Wood-burning stove",
+ 102: "Flowerpot",
+ 103: "Beaker",
+ 104: "Oyster",
+ 105: "Woodpecker",
+ 106: "Harp",
+ 107: "Bathtub",
+ 108: "Wall clock",
+ 109: "Sports uniform",
+ 110: "Rhinoceros",
+ 111: "Beehive",
+ 112: "Cupboard",
+ 113: "Chicken",
+ 114: "Man",
+ 115: "Blue jay",
+ 116: "Cucumber",
+ 117: "Balloon",
+ 118: "Kite",
+ 119: "Fireplace",
+ 120: "Lantern",
+ 121: "Missile",
+ 122: "Book",
+ 123: "Spoon",
+ 124: "Grapefruit",
+ 125: "Squirrel",
+ 126: "Orange",
+ 127: "Coat",
+ 128: "Punching bag",
+ 129: "Zebra",
+ 130: "Billboard",
+ 131: "Bicycle",
+ 132: "Door handle",
+ 133: "Mechanical fan",
+ 134: "Ring binder",
+ 135: "Table",
+ 136: "Parrot",
+ 137: "Sock",
+ 138: "Vase",
+ 139: "Weapon",
+ 140: "Shotgun",
+ 141: "Glasses",
+ 142: "Seahorse",
+ 143: "Belt",
+ 144: "Watercraft",
+ 145: "Window",
+ 146: "Giraffe",
+ 147: "Lion",
+ 148: "Tire",
+ 149: "Vehicle",
+ 150: "Canoe",
+ 151: "Tie",
+ 152: "Shelf",
+ 153: "Picture frame",
+ 154: "Printer",
+ 155: "Human leg",
+ 156: "Boat",
+ 157: "Slow cooker",
+ 158: "Croissant",
+ 159: "Candle",
+ 160: "Pancake",
+ 161: "Pillow",
+ 162: "Coin",
+ 163: "Stretcher",
+ 164: "Sandal",
+ 165: "Woman",
+ 166: "Stairs",
+ 167: "Harpsichord",
+ 168: "Stool",
+ 169: "Bus",
+ 170: "Suitcase",
+ 171: "Human mouth",
+ 172: "Juice",
+ 173: "Skull",
+ 174: "Door",
+ 175: "Violin",
+ 176: "Chopsticks",
+ 177: "Digital clock",
+ 178: "Sunflower",
+ 179: "Leopard",
+ 180: "Bell pepper",
+ 181: "Harbor seal",
+ 182: "Snake",
+ 183: "Sewing machine",
+ 184: "Goose",
+ 185: "Helicopter",
+ 186: "Seat belt",
+ 187: "Coffee cup",
+ 188: "Microwave oven",
+ 189: "Hot dog",
+ 190: "Countertop",
+ 191: "Serving tray",
+ 192: "Dog bed",
+ 193: "Beer",
+ 194: "Sunglasses",
+ 195: "Golf ball",
+ 196: "Waffle",
+ 197: "Palm tree",
+ 198: "Trumpet",
+ 199: "Ruler",
+ 200: "Helmet",
+ 201: "Ladder",
+ 202: "Office building",
+ 203: "Tablet computer",
+ 204: "Toilet paper",
+ 205: "Pomegranate",
+ 206: "Skirt",
+ 207: "Gas stove",
+ 208: "Cookie",
+ 209: "Cart",
+ 210: "Raven",
+ 211: "Egg",
+ 212: "Burrito",
+ 213: "Goat",
+ 214: "Kitchen knife",
+ 215: "Skateboard",
+ 216: "Salt and pepper shakers",
+ 217: "Lynx",
+ 218: "Boot",
+ 219: "Platter",
+ 220: "Ski",
+ 221: "Swimwear",
+ 222: "Swimming pool",
+ 223: "Drinking straw",
+ 224: "Wrench",
+ 225: "Drum",
+ 226: "Ant",
+ 227: "Human ear",
+ 228: "Headphones",
+ 229: "Fountain",
+ 230: "Bird",
+ 231: "Jeans",
+ 232: "Television",
+ 233: "Crab",
+ 234: "Microphone",
+ 235: "Home appliance",
+ 236: "Snowplow",
+ 237: "Beetle",
+ 238: "Artichoke",
+ 239: "Jet ski",
+ 240: "Stationary bicycle",
+ 241: "Human hair",
+ 242: "Brown bear",
+ 243: "Starfish",
+ 244: "Fork",
+ 245: "Lobster",
+ 246: "Corded phone",
+ 247: "Drink",
+ 248: "Saucer",
+ 249: "Carrot",
+ 250: "Insect",
+ 251: "Clock",
+ 252: "Castle",
+ 253: "Tennis racket",
+ 254: "Ceiling fan",
+ 255: "Asparagus",
+ 256: "Jaguar",
+ 257: "Musical instrument",
+ 258: "Train",
+ 259: "Cat",
+ 260: "Rifle",
+ 261: "Dumbbell",
+ 262: "Mobile phone",
+ 263: "Taxi",
+ 264: "Shower",
+ 265: "Pitcher",
+ 266: "Lemon",
+ 267: "Invertebrate",
+ 268: "Turkey",
+ 269: "High heels",
+ 270: "Bust",
+ 271: "Elephant",
+ 272: "Scarf",
+ 273: "Barrel",
+ 274: "Trombone",
+ 275: "Pumpkin",
+ 276: "Box",
+ 277: "Tomato",
+ 278: "Frog",
+ 279: "Bidet",
+ 280: "Human face",
+ 281: "Houseplant",
+ 282: "Van",
+ 283: "Shark",
+ 284: "Ice cream",
+ 285: "Swim cap",
+ 286: "Falcon",
+ 287: "Ostrich",
+ 288: "Handgun",
+ 289: "Whiteboard",
+ 290: "Lizard",
+ 291: "Pasta",
+ 292: "Snowmobile",
+ 293: "Light bulb",
+ 294: "Window blind",
+ 295: "Muffin",
+ 296: "Pretzel",
+ 297: "Computer monitor",
+ 298: "Horn",
+ 299: "Furniture",
+ 300: "Sandwich",
+ 301: "Fox",
+ 302: "Convenience store",
+ 303: "Fish",
+ 304: "Fruit",
+ 305: "Earrings",
+ 306: "Curtain",
+ 307: "Grape",
+ 308: "Sofa bed",
+ 309: "Horse",
+ 310: "Luggage and bags",
+ 311: "Desk",
+ 312: "Crutch",
+ 313: "Bicycle helmet",
+ 314: "Tick",
+ 315: "Airplane",
+ 316: "Canary",
+ 317: "Spatula",
+ 318: "Watch",
+ 319: "Lily",
+ 320: "Kitchen appliance",
+ 321: "Filing cabinet",
+ 322: "Aircraft",
+ 323: "Cake stand",
+ 324: "Candy",
+ 325: "Sink",
+ 326: "Mouse",
+ 327: "Wine",
+ 328: "Wheelchair",
+ 329: "Goldfish",
+ 330: "Refrigerator",
+ 331: "French fries",
+ 332: "Drawer",
+ 333: "Treadmill",
+ 334: "Picnic basket",
+ 335: "Dice",
+ 336: "Cabbage",
+ 337: "Football helmet",
+ 338: "Pig",
+ 339: "Person",
+ 340: "Shorts",
+ 341: "Gondola",
+ 342: "Honeycomb",
+ 343: "Doughnut",
+ 344: "Chest of drawers",
+ 345: "Land vehicle",
+ 346: "Bat",
+ 347: "Monkey",
+ 348: "Dagger",
+ 349: "Tableware",
+ 350: "Human foot",
+ 351: "Mug",
+ 352: "Alarm clock",
+ 353: "Pressure cooker",
+ 354: "Human hand",
+ 355: "Tortoise",
+ 356: "Baseball glove",
+ 357: "Sword",
+ 358: "Pear",
+ 359: "Miniskirt",
+ 360: "Traffic sign",
+ 361: "Girl",
+ 362: "Roller skates",
+ 363: "Dinosaur",
+ 364: "Porch",
+ 365: "Human beard",
+ 366: "Submarine sandwich",
+ 367: "Screwdriver",
+ 368: "Strawberry",
+ 369: "Wine glass",
+ 370: "Seafood",
+ 371: "Racket",
+ 372: "Wheel",
+ 373: "Sea lion",
+ 374: "Toy",
+ 375: "Tea",
+ 376: "Tennis ball",
+ 377: "Waste container",
+ 378: "Mule",
+ 379: "Cricket ball",
+ 380: "Pineapple",
+ 381: "Coconut",
+ 382: "Doll",
+ 383: "Coffee table",
+ 384: "Snowman",
+ 385: "Lavender",
+ 386: "Shrimp",
+ 387: "Maple",
+ 388: "Cowboy hat",
+ 389: "Goggles",
+ 390: "Rugby ball",
+ 391: "Caterpillar",
+ 392: "Poster",
+ 393: "Rocket",
+ 394: "Organ",
+ 395: "Saxophone",
+ 396: "Traffic light",
+ 397: "Cocktail",
+ 398: "Plastic bag",
+ 399: "Squash",
+ 400: "Mushroom",
+ 401: "Hamburger",
+ 402: "Light switch",
+ 403: "Parachute",
+ 404: "Teddy bear",
+ 405: "Winter melon",
+ 406: "Deer",
+ 407: "Musical keyboard",
+ 408: "Plumbing fixture",
+ 409: "Scoreboard",
+ 410: "Baseball bat",
+ 411: "Envelope",
+ 412: "Adhesive tape",
+ 413: "Briefcase",
+ 414: "Paddle",
+ 415: "Bow and arrow",
+ 416: "Telephone",
+ 417: "Sheep",
+ 418: "Jacket",
+ 419: "Boy",
+ 420: "Pizza",
+ 421: "Otter",
+ 422: "Office supplies",
+ 423: "Couch",
+ 424: "Cello",
+ 425: "Bull",
+ 426: "Camel",
+ 427: "Ball",
+ 428: "Duck",
+ 429: "Whale",
+ 430: "Shirt",
+ 431: "Tank",
+ 432: "Motorcycle",
+ 433: "Accordion",
+ 434: "Owl",
+ 435: "Porcupine",
+ 436: "Sun hat",
+ 437: "Nail",
+ 438: "Scissors",
+ 439: "Swan",
+ 440: "Lamp",
+ 441: "Crown",
+ 442: "Piano",
+ 443: "Sculpture",
+ 444: "Cheetah",
+ 445: "Oboe",
+ 446: "Tin can",
+ 447: "Mango",
+ 448: "Tripod",
+ 449: "Oven",
+ 450: "Mouse",
+ 451: "Barge",
+ 452: "Coffee",
+ 453: "Snowboard",
+ 454: "Common fig",
+ 455: "Salad",
+ 456: "Marine invertebrates",
+ 457: "Umbrella",
+ 458: "Kangaroo",
+ 459: "Human arm",
+ 460: "Measuring cup",
+ 461: "Snail",
+ 462: "Loveseat",
+ 463: "Suit",
+ 464: "Teapot",
+ 465: "Bottle",
+ 466: "Alpaca",
+ 467: "Kettle",
+ 468: "Trousers",
+ 469: "Popcorn",
+ 470: "Centipede",
+ 471: "Spider",
+ 472: "Sparrow",
+ 473: "Plate",
+ 474: "Bagel",
+ 475: "Personal care",
+ 476: "Apple",
+ 477: "Brassiere",
+ 478: "Bathroom cabinet",
+ 479: "studio couch",
+ 480: "Computer keyboard",
+ 481: "Table tennis racket",
+ 482: "Sushi",
+ 483: "Cabinetry",
+ 484: "Street light",
+ 485: "Towel",
+ 486: "Nightstand",
+ 487: "Rabbit",
+ 488: "Dolphin",
+ 489: "Dog",
+ 490: "Jug",
+ 491: "Wok",
+ 492: "Fire hydrant",
+ 493: "Human eye",
+ 494: "Skyscraper",
+ 495: "Backpack",
+ 496: "Potato",
+ 497: "Paper towel",
+ 498: "Lifejacket",
+ 499: "Bicycle wheel",
+ 500: "Toilet",
+ }
+
+ return clsid2catid, catid2name
+
+
+def _visdrone_category():
+ clsid2catid = {i: i for i in range(10)}
+
+ catid2name = {
+ 0: 'pedestrian',
+ 1: 'people',
+ 2: 'bicycle',
+ 3: 'car',
+ 4: 'van',
+ 5: 'truck',
+ 6: 'tricycle',
+ 7: 'awning-tricycle',
+ 8: 'bus',
+ 9: 'motor'
+ }
+ return clsid2catid, catid2name
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/coco.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/coco.py
new file mode 100644
index 000000000..0efc9ae0e
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/coco.py
@@ -0,0 +1,251 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import numpy as np
+from ppdet.core.workspace import register, serializable
+from .dataset import DetDataset
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+@register
+@serializable
+class COCODataSet(DetDataset):
+ """
+ Load dataset with COCO format.
+
+ Args:
+ dataset_dir (str): root directory for dataset.
+ image_dir (str): directory for images.
+ anno_path (str): coco annotation file path.
+ data_fields (list): key name of data dictionary, at least have 'image'.
+ sample_num (int): number of samples to load, -1 means all.
+ load_crowd (bool): whether to load crowded ground-truth.
+ False as default
+ allow_empty (bool): whether to load empty entry. False as default
+ empty_ratio (float): the ratio of empty record number to total
+ record's, if empty_ratio is out of [0. ,1.), do not sample the
+ records and use all the empty entries. 1. as default
+ """
+
+ def __init__(self,
+ dataset_dir=None,
+ image_dir=None,
+ anno_path=None,
+ data_fields=['image'],
+ sample_num=-1,
+ load_crowd=False,
+ allow_empty=False,
+ empty_ratio=1.):
+ super(COCODataSet, self).__init__(dataset_dir, image_dir, anno_path,
+ data_fields, sample_num)
+ self.load_image_only = False
+ self.load_semantic = False
+ self.load_crowd = load_crowd
+ self.allow_empty = allow_empty
+ self.empty_ratio = empty_ratio
+
+ def _sample_empty(self, records, num):
+ # if empty_ratio is out of [0. ,1.), do not sample the records
+ if self.empty_ratio < 0. or self.empty_ratio >= 1.:
+ return records
+ import random
+ sample_num = min(
+ int(num * self.empty_ratio / (1 - self.empty_ratio)), len(records))
+ records = random.sample(records, sample_num)
+ return records
+
+ def parse_dataset(self):
+ anno_path = os.path.join(self.dataset_dir, self.anno_path)
+ image_dir = os.path.join(self.dataset_dir, self.image_dir)
+
+ assert anno_path.endswith('.json'), \
+ 'invalid coco annotation file: ' + anno_path
+ from pycocotools.coco import COCO
+ coco = COCO(anno_path)
+ img_ids = coco.getImgIds()
+ img_ids.sort()
+ cat_ids = coco.getCatIds()
+ records = []
+ empty_records = []
+ ct = 0
+
+ self.catid2clsid = dict({catid: i for i, catid in enumerate(cat_ids)})
+ self.cname2cid = dict({
+ coco.loadCats(catid)[0]['name']: clsid
+ for catid, clsid in self.catid2clsid.items()
+ })
+
+ if 'annotations' not in coco.dataset:
+ self.load_image_only = True
+ logger.warning('Annotation file: {} does not contains ground truth '
+ 'and load image information only.'.format(anno_path))
+
+ for img_id in img_ids:
+ img_anno = coco.loadImgs([img_id])[0]
+ im_fname = img_anno['file_name']
+ im_w = float(img_anno['width'])
+ im_h = float(img_anno['height'])
+
+ im_path = os.path.join(image_dir,
+ im_fname) if image_dir else im_fname
+ im_path = im_path.rstrip()
+ is_empty = False
+
+ if not os.path.exists(im_path):
+ logger.warning('Illegal image file: {}, and it will be '
+ 'ignored'.format(im_path))
+ continue
+
+ if im_w < 0 or im_h < 0:
+ logger.warning('Illegal width: {} or height: {} in annotation, '
+ 'and im_id: {} will be ignored'.format(
+ im_w, im_h, img_id))
+ continue
+
+ coco_rec = {
+ 'im_file': im_path,
+ 'im_id': np.array([img_id]),
+ 'h': im_h,
+ 'w': im_w,
+ } if 'image' in self.data_fields else {}
+
+ if not self.load_image_only:
+ ins_anno_ids = coco.getAnnIds(
+ imgIds=[img_id], iscrowd=None if self.load_crowd else False)
+ instances = coco.loadAnns(ins_anno_ids)
+
+ bboxes = []
+ is_rbox_anno = False
+ for inst in instances:
+ # check gt bbox
+ if inst.get('ignore', False):
+ continue
+ if 'bbox' not in inst.keys():
+ continue
+ else:
+ if not any(np.array(inst['bbox'])):
+ continue
+
+ # read rbox anno or not
+ is_rbox_anno = True if len(inst['bbox']) == 5 else False
+ if is_rbox_anno:
+ xc, yc, box_w, box_h, angle = inst['bbox']
+ x1 = xc - box_w / 2.0
+ y1 = yc - box_h / 2.0
+ x2 = x1 + box_w
+ y2 = y1 + box_h
+ else:
+ x1, y1, box_w, box_h = inst['bbox']
+ x2 = x1 + box_w
+ y2 = y1 + box_h
+ eps = 1e-5
+ if inst['area'] > 0 and x2 - x1 > eps and y2 - y1 > eps:
+ inst['clean_bbox'] = [
+ round(float(x), 3) for x in [x1, y1, x2, y2]
+ ]
+ if is_rbox_anno:
+ inst['clean_rbox'] = [xc, yc, box_w, box_h, angle]
+ bboxes.append(inst)
+ else:
+ logger.warning(
+ 'Found an invalid bbox in annotations: im_id: {}, '
+ 'area: {} x1: {}, y1: {}, x2: {}, y2: {}.'.format(
+ img_id, float(inst['area']), x1, y1, x2, y2))
+
+ num_bbox = len(bboxes)
+ if num_bbox <= 0 and not self.allow_empty:
+ continue
+ elif num_bbox <= 0:
+ is_empty = True
+
+ gt_bbox = np.zeros((num_bbox, 4), dtype=np.float32)
+ if is_rbox_anno:
+ gt_rbox = np.zeros((num_bbox, 5), dtype=np.float32)
+ gt_theta = np.zeros((num_bbox, 1), dtype=np.int32)
+ gt_class = np.zeros((num_bbox, 1), dtype=np.int32)
+ is_crowd = np.zeros((num_bbox, 1), dtype=np.int32)
+ gt_poly = [None] * num_bbox
+
+ has_segmentation = False
+ for i, box in enumerate(bboxes):
+ catid = box['category_id']
+ gt_class[i][0] = self.catid2clsid[catid]
+ gt_bbox[i, :] = box['clean_bbox']
+ # xc, yc, w, h, theta
+ if is_rbox_anno:
+ gt_rbox[i, :] = box['clean_rbox']
+ is_crowd[i][0] = box['iscrowd']
+ # check RLE format
+ if 'segmentation' in box and box['iscrowd'] == 1:
+ gt_poly[i] = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
+ elif 'segmentation' in box and box['segmentation']:
+ if not np.array(box['segmentation']
+ ).size > 0 and not self.allow_empty:
+ bboxes.pop(i)
+ gt_poly.pop(i)
+ np.delete(is_crowd, i)
+ np.delete(gt_class, i)
+ np.delete(gt_bbox, i)
+ else:
+ gt_poly[i] = box['segmentation']
+ has_segmentation = True
+
+ if has_segmentation and not any(
+ gt_poly) and not self.allow_empty:
+ continue
+
+ if is_rbox_anno:
+ gt_rec = {
+ 'is_crowd': is_crowd,
+ 'gt_class': gt_class,
+ 'gt_bbox': gt_bbox,
+ 'gt_rbox': gt_rbox,
+ 'gt_poly': gt_poly,
+ }
+ else:
+ gt_rec = {
+ 'is_crowd': is_crowd,
+ 'gt_class': gt_class,
+ 'gt_bbox': gt_bbox,
+ 'gt_poly': gt_poly,
+ }
+
+ for k, v in gt_rec.items():
+ if k in self.data_fields:
+ coco_rec[k] = v
+
+ # TODO: remove load_semantic
+ if self.load_semantic and 'semantic' in self.data_fields:
+ seg_path = os.path.join(self.dataset_dir, 'stuffthingmaps',
+ 'train2017', im_fname[:-3] + 'png')
+ coco_rec.update({'semantic': seg_path})
+
+ logger.debug('Load file: {}, im_id: {}, h: {}, w: {}.'.format(
+ im_path, img_id, im_h, im_w))
+ if is_empty:
+ empty_records.append(coco_rec)
+ else:
+ records.append(coco_rec)
+ ct += 1
+ if self.sample_num > 0 and ct >= self.sample_num:
+ break
+ assert ct > 0, 'not found any coco record in %s' % (anno_path)
+ logger.debug('{} samples in file {}'.format(ct, anno_path))
+ if self.allow_empty and len(empty_records) > 0:
+ empty_records = self._sample_empty(empty_records, len(records))
+ records += empty_records
+ self.roidbs = records
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/dataset.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/dataset.py
new file mode 100644
index 000000000..1bef548e6
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/dataset.py
@@ -0,0 +1,197 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import numpy as np
+
+try:
+ from collections.abc import Sequence
+except Exception:
+ from collections import Sequence
+from paddle.io import Dataset
+from ppdet.core.workspace import register, serializable
+from ppdet.utils.download import get_dataset_path
+import copy
+
+
+@serializable
+class DetDataset(Dataset):
+ """
+ Load detection dataset.
+
+ Args:
+ dataset_dir (str): root directory for dataset.
+ image_dir (str): directory for images.
+ anno_path (str): annotation file path.
+ data_fields (list): key name of data dictionary, at least have 'image'.
+ sample_num (int): number of samples to load, -1 means all.
+ use_default_label (bool): whether to load default label list.
+ """
+
+ def __init__(self,
+ dataset_dir=None,
+ image_dir=None,
+ anno_path=None,
+ data_fields=['image'],
+ sample_num=-1,
+ use_default_label=None,
+ **kwargs):
+ super(DetDataset, self).__init__()
+ self.dataset_dir = dataset_dir if dataset_dir is not None else ''
+ self.anno_path = anno_path
+ self.image_dir = image_dir if image_dir is not None else ''
+ self.data_fields = data_fields
+ self.sample_num = sample_num
+ self.use_default_label = use_default_label
+ self._epoch = 0
+ self._curr_iter = 0
+
+ def __len__(self, ):
+ return len(self.roidbs)
+
+ def __getitem__(self, idx):
+ # data batch
+ roidb = copy.deepcopy(self.roidbs[idx])
+ if self.mixup_epoch == 0 or self._epoch < self.mixup_epoch:
+ n = len(self.roidbs)
+ idx = np.random.randint(n)
+ roidb = [roidb, copy.deepcopy(self.roidbs[idx])]
+ elif self.cutmix_epoch == 0 or self._epoch < self.cutmix_epoch:
+ n = len(self.roidbs)
+ idx = np.random.randint(n)
+ roidb = [roidb, copy.deepcopy(self.roidbs[idx])]
+ elif self.mosaic_epoch == 0 or self._epoch < self.mosaic_epoch:
+ n = len(self.roidbs)
+ roidb = [roidb, ] + [
+ copy.deepcopy(self.roidbs[np.random.randint(n)])
+ for _ in range(3)
+ ]
+ if isinstance(roidb, Sequence):
+ for r in roidb:
+ r['curr_iter'] = self._curr_iter
+ else:
+ roidb['curr_iter'] = self._curr_iter
+ self._curr_iter += 1
+
+ return self.transform(roidb)
+
+ def check_or_download_dataset(self):
+ self.dataset_dir = get_dataset_path(self.dataset_dir, self.anno_path,
+ self.image_dir)
+
+ def set_kwargs(self, **kwargs):
+ self.mixup_epoch = kwargs.get('mixup_epoch', -1)
+ self.cutmix_epoch = kwargs.get('cutmix_epoch', -1)
+ self.mosaic_epoch = kwargs.get('mosaic_epoch', -1)
+
+ def set_transform(self, transform):
+ self.transform = transform
+
+ def set_epoch(self, epoch_id):
+ self._epoch = epoch_id
+
+ def parse_dataset(self, ):
+ raise NotImplementedError(
+ "Need to implement parse_dataset method of Dataset")
+
+ def get_anno(self):
+ if self.anno_path is None:
+ return
+ return os.path.join(self.dataset_dir, self.anno_path)
+
+
+def _is_valid_file(f, extensions=('.jpg', '.jpeg', '.png', '.bmp')):
+ return f.lower().endswith(extensions)
+
+
+def _make_dataset(dir):
+ dir = os.path.expanduser(dir)
+ if not os.path.isdir(dir):
+ raise ('{} should be a dir'.format(dir))
+ images = []
+ for root, _, fnames in sorted(os.walk(dir, followlinks=True)):
+ for fname in sorted(fnames):
+ path = os.path.join(root, fname)
+ if _is_valid_file(path):
+ images.append(path)
+ return images
+
+
+@register
+@serializable
+class ImageFolder(DetDataset):
+ def __init__(self,
+ dataset_dir=None,
+ image_dir=None,
+ anno_path=None,
+ sample_num=-1,
+ use_default_label=None,
+ **kwargs):
+ super(ImageFolder, self).__init__(
+ dataset_dir,
+ image_dir,
+ anno_path,
+ sample_num=sample_num,
+ use_default_label=use_default_label)
+ self._imid2path = {}
+ self.roidbs = None
+ self.sample_num = sample_num
+
+ def check_or_download_dataset(self):
+ if self.dataset_dir:
+ # NOTE: ImageFolder is only used for prediction, in
+ # infer mode, image_dir is set by set_images
+ # so we only check anno_path here
+ self.dataset_dir = get_dataset_path(self.dataset_dir,
+ self.anno_path, None)
+
+ def parse_dataset(self, ):
+ if not self.roidbs:
+ self.roidbs = self._load_images()
+
+ def _parse(self):
+ image_dir = self.image_dir
+ if not isinstance(image_dir, Sequence):
+ image_dir = [image_dir]
+ images = []
+ for im_dir in image_dir:
+ if os.path.isdir(im_dir):
+ im_dir = os.path.join(self.dataset_dir, im_dir)
+ images.extend(_make_dataset(im_dir))
+ elif os.path.isfile(im_dir) and _is_valid_file(im_dir):
+ images.append(im_dir)
+ return images
+
+ def _load_images(self):
+ images = self._parse()
+ ct = 0
+ records = []
+ for image in images:
+ assert image != '' and os.path.isfile(image), \
+ "Image {} not found".format(image)
+ if self.sample_num > 0 and ct >= self.sample_num:
+ break
+ rec = {'im_id': np.array([ct]), 'im_file': image}
+ self._imid2path[ct] = image
+ ct += 1
+ records.append(rec)
+ assert len(records) > 0, "No image file found"
+ return records
+
+ def get_imid2path(self):
+ return self._imid2path
+
+ def set_images(self, images):
+ self.image_dir = images
+ self.roidbs = self._load_images()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/keypoint_coco.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/keypoint_coco.py
new file mode 100644
index 000000000..fdea57ada
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/keypoint_coco.py
@@ -0,0 +1,674 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import cv2
+import numpy as np
+import json
+import copy
+import pycocotools
+from pycocotools.coco import COCO
+from .dataset import DetDataset
+from ppdet.core.workspace import register, serializable
+
+
+@serializable
+class KeypointBottomUpBaseDataset(DetDataset):
+ """Base class for bottom-up datasets. Adapted from
+ https://github.com/open-mmlab/mmpose
+
+ All datasets should subclass it.
+ All subclasses should overwrite:
+ Methods:`_get_imganno`
+
+ Args:
+ dataset_dir (str): Root path to the dataset.
+ anno_path (str): Relative path to the annotation file.
+ image_dir (str): Path to a directory where images are held.
+ Default: None.
+ num_joints (int): keypoint numbers
+ transform (composed(operators)): A sequence of data transforms.
+ shard (list): [rank, worldsize], the distributed env params
+ test_mode (bool): Store True when building test or
+ validation dataset. Default: False.
+ """
+
+ def __init__(self,
+ dataset_dir,
+ image_dir,
+ anno_path,
+ num_joints,
+ transform=[],
+ shard=[0, 1],
+ test_mode=False):
+ super().__init__(dataset_dir, image_dir, anno_path)
+ self.image_info = {}
+ self.ann_info = {}
+
+ self.img_prefix = os.path.join(dataset_dir, image_dir)
+ self.transform = transform
+ self.test_mode = test_mode
+
+ self.ann_info['num_joints'] = num_joints
+ self.img_ids = []
+
+ def parse_dataset(self):
+ pass
+
+ def __len__(self):
+ """Get dataset length."""
+ return len(self.img_ids)
+
+ def _get_imganno(self, idx):
+ """Get anno for a single image."""
+ raise NotImplementedError
+
+ def __getitem__(self, idx):
+ """Prepare image for training given the index."""
+ records = copy.deepcopy(self._get_imganno(idx))
+ records['image'] = cv2.imread(records['image_file'])
+ records['image'] = cv2.cvtColor(records['image'], cv2.COLOR_BGR2RGB)
+ records['mask'] = (records['mask'] + 0).astype('uint8')
+ records = self.transform(records)
+ return records
+
+ def parse_dataset(self):
+ return
+
+
+@register
+@serializable
+class KeypointBottomUpCocoDataset(KeypointBottomUpBaseDataset):
+ """COCO dataset for bottom-up pose estimation. Adapted from
+ https://github.com/open-mmlab/mmpose
+
+ The dataset loads raw features and apply specified transforms
+ to return a dict containing the image tensors and other information.
+
+ COCO keypoint indexes::
+
+ 0: 'nose',
+ 1: 'left_eye',
+ 2: 'right_eye',
+ 3: 'left_ear',
+ 4: 'right_ear',
+ 5: 'left_shoulder',
+ 6: 'right_shoulder',
+ 7: 'left_elbow',
+ 8: 'right_elbow',
+ 9: 'left_wrist',
+ 10: 'right_wrist',
+ 11: 'left_hip',
+ 12: 'right_hip',
+ 13: 'left_knee',
+ 14: 'right_knee',
+ 15: 'left_ankle',
+ 16: 'right_ankle'
+
+ Args:
+ dataset_dir (str): Root path to the dataset.
+ anno_path (str): Relative path to the annotation file.
+ image_dir (str): Path to a directory where images are held.
+ Default: None.
+ num_joints (int): keypoint numbers
+ transform (composed(operators)): A sequence of data transforms.
+ shard (list): [rank, worldsize], the distributed env params
+ test_mode (bool): Store True when building test or
+ validation dataset. Default: False.
+ """
+
+ def __init__(self,
+ dataset_dir,
+ image_dir,
+ anno_path,
+ num_joints,
+ transform=[],
+ shard=[0, 1],
+ test_mode=False):
+ super().__init__(dataset_dir, image_dir, anno_path, num_joints,
+ transform, shard, test_mode)
+
+ self.ann_file = os.path.join(dataset_dir, anno_path)
+ self.shard = shard
+ self.test_mode = test_mode
+
+ def parse_dataset(self):
+ self.coco = COCO(self.ann_file)
+
+ self.img_ids = self.coco.getImgIds()
+ if not self.test_mode:
+ self.img_ids = [
+ img_id for img_id in self.img_ids
+ if len(self.coco.getAnnIds(
+ imgIds=img_id, iscrowd=None)) > 0
+ ]
+ blocknum = int(len(self.img_ids) / self.shard[1])
+ self.img_ids = self.img_ids[(blocknum * self.shard[0]):(blocknum * (
+ self.shard[0] + 1))]
+ self.num_images = len(self.img_ids)
+ self.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)
+ self.dataset_name = 'coco'
+
+ cat_ids = self.coco.getCatIds()
+ self.catid2clsid = dict({catid: i for i, catid in enumerate(cat_ids)})
+ print('=> num_images: {}'.format(self.num_images))
+
+ @staticmethod
+ def _get_mapping_id_name(imgs):
+ """
+ Args:
+ imgs (dict): dict of image info.
+
+ Returns:
+ tuple: Image name & id mapping dicts.
+
+ - id2name (dict): Mapping image id to name.
+ - name2id (dict): Mapping image name to id.
+ """
+ id2name = {}
+ name2id = {}
+ for image_id, image in imgs.items():
+ file_name = image['file_name']
+ id2name[image_id] = file_name
+ name2id[file_name] = image_id
+
+ return id2name, name2id
+
+ def _get_imganno(self, idx):
+ """Get anno for a single image.
+
+ Args:
+ idx (int): image idx
+
+ Returns:
+ dict: info for model training
+ """
+ coco = self.coco
+ img_id = self.img_ids[idx]
+ ann_ids = coco.getAnnIds(imgIds=img_id)
+ anno = coco.loadAnns(ann_ids)
+
+ mask = self._get_mask(anno, idx)
+ anno = [
+ obj for obj in anno
+ if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0
+ ]
+
+ joints, orgsize = self._get_joints(anno, idx)
+
+ db_rec = {}
+ db_rec['im_id'] = img_id
+ db_rec['image_file'] = os.path.join(self.img_prefix,
+ self.id2name[img_id])
+ db_rec['mask'] = mask
+ db_rec['joints'] = joints
+ db_rec['im_shape'] = orgsize
+
+ return db_rec
+
+ def _get_joints(self, anno, idx):
+ """Get joints for all people in an image."""
+ num_people = len(anno)
+
+ joints = np.zeros(
+ (num_people, self.ann_info['num_joints'], 3), dtype=np.float32)
+
+ for i, obj in enumerate(anno):
+ joints[i, :self.ann_info['num_joints'], :3] = \
+ np.array(obj['keypoints']).reshape([-1, 3])
+
+ img_info = self.coco.loadImgs(self.img_ids[idx])[0]
+ joints[..., 0] /= img_info['width']
+ joints[..., 1] /= img_info['height']
+ orgsize = np.array([img_info['height'], img_info['width']])
+
+ return joints, orgsize
+
+ def _get_mask(self, anno, idx):
+ """Get ignore masks to mask out losses."""
+ coco = self.coco
+ img_info = coco.loadImgs(self.img_ids[idx])[0]
+
+ m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)
+
+ for obj in anno:
+ if 'segmentation' in obj:
+ if obj['iscrowd']:
+ rle = pycocotools.mask.frPyObjects(obj['segmentation'],
+ img_info['height'],
+ img_info['width'])
+ m += pycocotools.mask.decode(rle)
+ elif obj['num_keypoints'] == 0:
+ rles = pycocotools.mask.frPyObjects(obj['segmentation'],
+ img_info['height'],
+ img_info['width'])
+ for rle in rles:
+ m += pycocotools.mask.decode(rle)
+
+ return m < 0.5
+
+
+@register
+@serializable
+class KeypointBottomUpCrowdPoseDataset(KeypointBottomUpCocoDataset):
+ """CrowdPose dataset for bottom-up pose estimation. Adapted from
+ https://github.com/open-mmlab/mmpose
+
+ The dataset loads raw features and apply specified transforms
+ to return a dict containing the image tensors and other information.
+
+ CrowdPose keypoint indexes::
+
+ 0: 'left_shoulder',
+ 1: 'right_shoulder',
+ 2: 'left_elbow',
+ 3: 'right_elbow',
+ 4: 'left_wrist',
+ 5: 'right_wrist',
+ 6: 'left_hip',
+ 7: 'right_hip',
+ 8: 'left_knee',
+ 9: 'right_knee',
+ 10: 'left_ankle',
+ 11: 'right_ankle',
+ 12: 'top_head',
+ 13: 'neck'
+
+ Args:
+ dataset_dir (str): Root path to the dataset.
+ anno_path (str): Relative path to the annotation file.
+ image_dir (str): Path to a directory where images are held.
+ Default: None.
+ num_joints (int): keypoint numbers
+ transform (composed(operators)): A sequence of data transforms.
+ shard (list): [rank, worldsize], the distributed env params
+ test_mode (bool): Store True when building test or
+ validation dataset. Default: False.
+ """
+
+ def __init__(self,
+ dataset_dir,
+ image_dir,
+ anno_path,
+ num_joints,
+ transform=[],
+ shard=[0, 1],
+ test_mode=False):
+ super().__init__(dataset_dir, image_dir, anno_path, num_joints,
+ transform, shard, test_mode)
+
+ self.ann_file = os.path.join(dataset_dir, anno_path)
+ self.shard = shard
+ self.test_mode = test_mode
+
+ def parse_dataset(self):
+ self.coco = COCO(self.ann_file)
+
+ self.img_ids = self.coco.getImgIds()
+ if not self.test_mode:
+ self.img_ids = [
+ img_id for img_id in self.img_ids
+ if len(self.coco.getAnnIds(
+ imgIds=img_id, iscrowd=None)) > 0
+ ]
+ blocknum = int(len(self.img_ids) / self.shard[1])
+ self.img_ids = self.img_ids[(blocknum * self.shard[0]):(blocknum * (
+ self.shard[0] + 1))]
+ self.num_images = len(self.img_ids)
+ self.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)
+
+ self.dataset_name = 'crowdpose'
+ print('=> num_images: {}'.format(self.num_images))
+
+
+@serializable
+class KeypointTopDownBaseDataset(DetDataset):
+ """Base class for top_down datasets.
+
+ All datasets should subclass it.
+ All subclasses should overwrite:
+ Methods:`_get_db`
+
+ Args:
+ dataset_dir (str): Root path to the dataset.
+ image_dir (str): Path to a directory where images are held.
+ anno_path (str): Relative path to the annotation file.
+ num_joints (int): keypoint numbers
+ transform (composed(operators)): A sequence of data transforms.
+ """
+
+ def __init__(self,
+ dataset_dir,
+ image_dir,
+ anno_path,
+ num_joints,
+ transform=[]):
+ super().__init__(dataset_dir, image_dir, anno_path)
+ self.image_info = {}
+ self.ann_info = {}
+
+ self.img_prefix = os.path.join(dataset_dir, image_dir)
+ self.transform = transform
+
+ self.ann_info['num_joints'] = num_joints
+ self.db = []
+
+ def __len__(self):
+ """Get dataset length."""
+ return len(self.db)
+
+ def _get_db(self):
+ """Get a sample"""
+ raise NotImplementedError
+
+ def __getitem__(self, idx):
+ """Prepare sample for training given the index."""
+ records = copy.deepcopy(self.db[idx])
+ records['image'] = cv2.imread(records['image_file'], cv2.IMREAD_COLOR |
+ cv2.IMREAD_IGNORE_ORIENTATION)
+ records['image'] = cv2.cvtColor(records['image'], cv2.COLOR_BGR2RGB)
+ records['score'] = records['score'] if 'score' in records else 1
+ records = self.transform(records)
+ # print('records', records)
+ return records
+
+
+@register
+@serializable
+class KeypointTopDownCocoDataset(KeypointTopDownBaseDataset):
+ """COCO dataset for top-down pose estimation. Adapted from
+ https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
+ Copyright (c) Microsoft, under the MIT License.
+
+ The dataset loads raw features and apply specified transforms
+ to return a dict containing the image tensors and other information.
+
+ COCO keypoint indexes:
+
+ 0: 'nose',
+ 1: 'left_eye',
+ 2: 'right_eye',
+ 3: 'left_ear',
+ 4: 'right_ear',
+ 5: 'left_shoulder',
+ 6: 'right_shoulder',
+ 7: 'left_elbow',
+ 8: 'right_elbow',
+ 9: 'left_wrist',
+ 10: 'right_wrist',
+ 11: 'left_hip',
+ 12: 'right_hip',
+ 13: 'left_knee',
+ 14: 'right_knee',
+ 15: 'left_ankle',
+ 16: 'right_ankle'
+
+ Args:
+ dataset_dir (str): Root path to the dataset.
+ image_dir (str): Path to a directory where images are held.
+ anno_path (str): Relative path to the annotation file.
+ num_joints (int): Keypoint numbers
+ trainsize (list):[w, h] Image target size
+ transform (composed(operators)): A sequence of data transforms.
+ bbox_file (str): Path to a detection bbox file
+ Default: None.
+ use_gt_bbox (bool): Whether to use ground truth bbox
+ Default: True.
+ pixel_std (int): The pixel std of the scale
+ Default: 200.
+ image_thre (float): The threshold to filter the detection box
+ Default: 0.0.
+ """
+
+ def __init__(self,
+ dataset_dir,
+ image_dir,
+ anno_path,
+ num_joints,
+ trainsize,
+ transform=[],
+ bbox_file=None,
+ use_gt_bbox=True,
+ pixel_std=200,
+ image_thre=0.0):
+ super().__init__(dataset_dir, image_dir, anno_path, num_joints,
+ transform)
+
+ self.bbox_file = bbox_file
+ self.use_gt_bbox = use_gt_bbox
+ self.trainsize = trainsize
+ self.pixel_std = pixel_std
+ self.image_thre = image_thre
+ self.dataset_name = 'coco'
+
+ def parse_dataset(self):
+ if self.use_gt_bbox:
+ self.db = self._load_coco_keypoint_annotations()
+ else:
+ self.db = self._load_coco_person_detection_results()
+
+ def _load_coco_keypoint_annotations(self):
+ coco = COCO(self.get_anno())
+ img_ids = coco.getImgIds()
+ gt_db = []
+ for index in img_ids:
+ im_ann = coco.loadImgs(index)[0]
+ width = im_ann['width']
+ height = im_ann['height']
+ file_name = im_ann['file_name']
+ im_id = int(im_ann["id"])
+
+ annIds = coco.getAnnIds(imgIds=index, iscrowd=False)
+ objs = coco.loadAnns(annIds)
+
+ valid_objs = []
+ for obj in objs:
+ x, y, w, h = obj['bbox']
+ x1 = np.max((0, x))
+ y1 = np.max((0, y))
+ x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
+ y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
+ if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
+ obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]
+ valid_objs.append(obj)
+ objs = valid_objs
+
+ rec = []
+ for obj in objs:
+ if max(obj['keypoints']) == 0:
+ continue
+
+ joints = np.zeros(
+ (self.ann_info['num_joints'], 3), dtype=np.float)
+ joints_vis = np.zeros(
+ (self.ann_info['num_joints'], 3), dtype=np.float)
+ for ipt in range(self.ann_info['num_joints']):
+ joints[ipt, 0] = obj['keypoints'][ipt * 3 + 0]
+ joints[ipt, 1] = obj['keypoints'][ipt * 3 + 1]
+ joints[ipt, 2] = 0
+ t_vis = obj['keypoints'][ipt * 3 + 2]
+ if t_vis > 1:
+ t_vis = 1
+ joints_vis[ipt, 0] = t_vis
+ joints_vis[ipt, 1] = t_vis
+ joints_vis[ipt, 2] = 0
+
+ center, scale = self._box2cs(obj['clean_bbox'][:4])
+ rec.append({
+ 'image_file': os.path.join(self.img_prefix, file_name),
+ 'center': center,
+ 'scale': scale,
+ 'joints': joints,
+ 'joints_vis': joints_vis,
+ 'im_id': im_id,
+ })
+ gt_db.extend(rec)
+
+ return gt_db
+
+ def _box2cs(self, box):
+ x, y, w, h = box[:4]
+ center = np.zeros((2), dtype=np.float32)
+ center[0] = x + w * 0.5
+ center[1] = y + h * 0.5
+ aspect_ratio = self.trainsize[0] * 1.0 / self.trainsize[1]
+
+ if w > aspect_ratio * h:
+ h = w * 1.0 / aspect_ratio
+ elif w < aspect_ratio * h:
+ w = h * aspect_ratio
+ scale = np.array(
+ [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],
+ dtype=np.float32)
+ if center[0] != -1:
+ scale = scale * 1.25
+
+ return center, scale
+
+ def _load_coco_person_detection_results(self):
+ all_boxes = None
+ bbox_file_path = os.path.join(self.dataset_dir, self.bbox_file)
+ with open(bbox_file_path, 'r') as f:
+ all_boxes = json.load(f)
+
+ if not all_boxes:
+ print('=> Load %s fail!' % bbox_file_path)
+ return None
+
+ kpt_db = []
+ for n_img in range(0, len(all_boxes)):
+ det_res = all_boxes[n_img]
+ if det_res['category_id'] != 1:
+ continue
+ file_name = det_res[
+ 'filename'] if 'filename' in det_res else '%012d.jpg' % det_res[
+ 'image_id']
+ img_name = os.path.join(self.img_prefix, file_name)
+ box = det_res['bbox']
+ score = det_res['score']
+ im_id = int(det_res['image_id'])
+
+ if score < self.image_thre:
+ continue
+
+ center, scale = self._box2cs(box)
+ joints = np.zeros((self.ann_info['num_joints'], 3), dtype=np.float)
+ joints_vis = np.ones(
+ (self.ann_info['num_joints'], 3), dtype=np.float)
+ kpt_db.append({
+ 'image_file': img_name,
+ 'im_id': im_id,
+ 'center': center,
+ 'scale': scale,
+ 'score': score,
+ 'joints': joints,
+ 'joints_vis': joints_vis,
+ })
+
+ return kpt_db
+
+
+@register
+@serializable
+class KeypointTopDownMPIIDataset(KeypointTopDownBaseDataset):
+ """MPII dataset for topdown pose estimation. Adapted from
+ https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
+ Copyright (c) Microsoft, under the MIT License.
+
+ The dataset loads raw features and apply specified transforms
+ to return a dict containing the image tensors and other information.
+
+ MPII keypoint indexes::
+
+ 0: 'right_ankle',
+ 1: 'right_knee',
+ 2: 'right_hip',
+ 3: 'left_hip',
+ 4: 'left_knee',
+ 5: 'left_ankle',
+ 6: 'pelvis',
+ 7: 'thorax',
+ 8: 'upper_neck',
+ 9: 'head_top',
+ 10: 'right_wrist',
+ 11: 'right_elbow',
+ 12: 'right_shoulder',
+ 13: 'left_shoulder',
+ 14: 'left_elbow',
+ 15: 'left_wrist',
+
+ Args:
+ dataset_dir (str): Root path to the dataset.
+ image_dir (str): Path to a directory where images are held.
+ anno_path (str): Relative path to the annotation file.
+ num_joints (int): Keypoint numbers
+ trainsize (list):[w, h] Image target size
+ transform (composed(operators)): A sequence of data transforms.
+ """
+
+ def __init__(self,
+ dataset_dir,
+ image_dir,
+ anno_path,
+ num_joints,
+ transform=[]):
+ super().__init__(dataset_dir, image_dir, anno_path, num_joints,
+ transform)
+
+ self.dataset_name = 'mpii'
+
+ def parse_dataset(self):
+ with open(self.get_anno()) as anno_file:
+ anno = json.load(anno_file)
+
+ gt_db = []
+ for a in anno:
+ image_name = a['image']
+ im_id = a['image_id'] if 'image_id' in a else int(
+ os.path.splitext(image_name)[0])
+
+ c = np.array(a['center'], dtype=np.float)
+ s = np.array([a['scale'], a['scale']], dtype=np.float)
+
+ # Adjust center/scale slightly to avoid cropping limbs
+ if c[0] != -1:
+ c[1] = c[1] + 15 * s[1]
+ s = s * 1.25
+ c = c - 1
+
+ joints = np.zeros((self.ann_info['num_joints'], 3), dtype=np.float)
+ joints_vis = np.zeros(
+ (self.ann_info['num_joints'], 3), dtype=np.float)
+ if 'joints' in a:
+ joints_ = np.array(a['joints'])
+ joints_[:, 0:2] = joints_[:, 0:2] - 1
+ joints_vis_ = np.array(a['joints_vis'])
+ assert len(joints_) == self.ann_info[
+ 'num_joints'], 'joint num diff: {} vs {}'.format(
+ len(joints_), self.ann_info['num_joints'])
+
+ joints[:, 0:2] = joints_[:, 0:2]
+ joints_vis[:, 0] = joints_vis_[:]
+ joints_vis[:, 1] = joints_vis_[:]
+
+ gt_db.append({
+ 'image_file': os.path.join(self.img_prefix, image_name),
+ 'im_id': im_id,
+ 'center': c,
+ 'scale': s,
+ 'joints': joints,
+ 'joints_vis': joints_vis
+ })
+ print("number length: {}".format(len(gt_db)))
+ self.db = gt_db
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/mot.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/mot.py
new file mode 100644
index 000000000..d46c02f52
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/mot.py
@@ -0,0 +1,628 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import cv2
+import glob
+import numpy as np
+from collections import OrderedDict, defaultdict
+try:
+ from collections.abc import Sequence
+except Exception:
+ from collections import Sequence
+from .dataset import DetDataset, _make_dataset, _is_valid_file
+from ppdet.core.workspace import register, serializable
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+@register
+@serializable
+class MOTDataSet(DetDataset):
+ """
+ Load dataset with MOT format, only support single class MOT.
+
+ Args:
+ dataset_dir (str): root directory for dataset.
+ image_lists (str|list): mot data image lists, muiti-source mot dataset.
+ data_fields (list): key name of data dictionary, at least have 'image'.
+ sample_num (int): number of samples to load, -1 means all.
+
+ Notes:
+ MOT datasets root directory following this:
+ dataset/mot
+ |——————image_lists
+ | |——————caltech.train
+ | |——————caltech.val
+ | |——————mot16.train
+ | |——————mot17.train
+ | ......
+ |——————Caltech
+ |——————MOT17
+ |——————......
+
+ All the MOT datasets have the following structure:
+ Caltech
+ |——————images
+ | └——————00001.jpg
+ | |—————— ...
+ | └——————0000N.jpg
+ └——————labels_with_ids
+ └——————00001.txt
+ |—————— ...
+ └——————0000N.txt
+ or
+
+ MOT17
+ |——————images
+ | └——————train
+ | └——————test
+ └——————labels_with_ids
+ └——————train
+ """
+
+ def __init__(self,
+ dataset_dir=None,
+ image_lists=[],
+ data_fields=['image'],
+ sample_num=-1):
+ super(MOTDataSet, self).__init__(
+ dataset_dir=dataset_dir,
+ data_fields=data_fields,
+ sample_num=sample_num)
+ self.dataset_dir = dataset_dir
+ self.image_lists = image_lists
+ if isinstance(self.image_lists, str):
+ self.image_lists = [self.image_lists]
+ self.roidbs = None
+ self.cname2cid = None
+
+ def get_anno(self):
+ if self.image_lists == []:
+ return
+ # only used to get categories and metric
+ # only check first data, but the label_list of all data should be same.
+ first_mot_data = self.image_lists[0].split('.')[0]
+ anno_file = os.path.join(self.dataset_dir, first_mot_data, 'label_list.txt')
+ return anno_file
+
+ def parse_dataset(self):
+ self.img_files = OrderedDict()
+ self.img_start_index = OrderedDict()
+ self.label_files = OrderedDict()
+ self.tid_num = OrderedDict()
+ self.tid_start_index = OrderedDict()
+
+ img_index = 0
+ for data_name in self.image_lists:
+ # check every data image list
+ image_lists_dir = os.path.join(self.dataset_dir, 'image_lists')
+ assert os.path.isdir(image_lists_dir), \
+ "The {} is not a directory.".format(image_lists_dir)
+
+ list_path = os.path.join(image_lists_dir, data_name)
+ assert os.path.exists(list_path), \
+ "The list path {} does not exist.".format(list_path)
+
+ # record img_files, filter out empty ones
+ with open(list_path, 'r') as file:
+ self.img_files[data_name] = file.readlines()
+ self.img_files[data_name] = [
+ os.path.join(self.dataset_dir, x.strip())
+ for x in self.img_files[data_name]
+ ]
+ self.img_files[data_name] = list(
+ filter(lambda x: len(x) > 0, self.img_files[data_name]))
+
+ self.img_start_index[data_name] = img_index
+ img_index += len(self.img_files[data_name])
+
+ # record label_files
+ self.label_files[data_name] = [
+ x.replace('images', 'labels_with_ids').replace(
+ '.png', '.txt').replace('.jpg', '.txt')
+ for x in self.img_files[data_name]
+ ]
+
+ for data_name, label_paths in self.label_files.items():
+ max_index = -1
+ for lp in label_paths:
+ lb = np.loadtxt(lp)
+ if len(lb) < 1:
+ continue
+ if len(lb.shape) < 2:
+ img_max = lb[1]
+ else:
+ img_max = np.max(lb[:, 1])
+ if img_max > max_index:
+ max_index = img_max
+ self.tid_num[data_name] = int(max_index + 1)
+
+ last_index = 0
+ for i, (k, v) in enumerate(self.tid_num.items()):
+ self.tid_start_index[k] = last_index
+ last_index += v
+
+ self.num_identities_dict = defaultdict(int)
+ self.num_identities_dict[0] = int(last_index + 1) # single class
+ self.num_imgs_each_data = [len(x) for x in self.img_files.values()]
+ self.total_imgs = sum(self.num_imgs_each_data)
+
+ logger.info('MOT dataset summary: ')
+ logger.info(self.tid_num)
+ logger.info('Total images: {}'.format(self.total_imgs))
+ logger.info('Image start index: {}'.format(self.img_start_index))
+ logger.info('Total identities: {}'.format(self.num_identities_dict[0]))
+ logger.info('Identity start index: {}'.format(self.tid_start_index))
+
+ records = []
+ cname2cid = mot_label()
+
+ for img_index in range(self.total_imgs):
+ for i, (k, v) in enumerate(self.img_start_index.items()):
+ if img_index >= v:
+ data_name = list(self.label_files.keys())[i]
+ start_index = v
+ img_file = self.img_files[data_name][img_index - start_index]
+ lbl_file = self.label_files[data_name][img_index - start_index]
+
+ if not os.path.exists(img_file):
+ logger.warning('Illegal image file: {}, and it will be ignored'.
+ format(img_file))
+ continue
+ if not os.path.isfile(lbl_file):
+ logger.warning('Illegal label file: {}, and it will be ignored'.
+ format(lbl_file))
+ continue
+
+ labels = np.loadtxt(lbl_file, dtype=np.float32).reshape(-1, 6)
+ # each row in labels (N, 6) is [gt_class, gt_identity, cx, cy, w, h]
+
+ cx, cy = labels[:, 2], labels[:, 3]
+ w, h = labels[:, 4], labels[:, 5]
+ gt_bbox = np.stack((cx, cy, w, h)).T.astype('float32')
+ gt_class = labels[:, 0:1].astype('int32')
+ gt_score = np.ones((len(labels), 1)).astype('float32')
+ gt_ide = labels[:, 1:2].astype('int32')
+ for i, _ in enumerate(gt_ide):
+ if gt_ide[i] > -1:
+ gt_ide[i] += self.tid_start_index[data_name]
+
+ mot_rec = {
+ 'im_file': img_file,
+ 'im_id': img_index,
+ } if 'image' in self.data_fields else {}
+
+ gt_rec = {
+ 'gt_class': gt_class,
+ 'gt_score': gt_score,
+ 'gt_bbox': gt_bbox,
+ 'gt_ide': gt_ide,
+ }
+
+ for k, v in gt_rec.items():
+ if k in self.data_fields:
+ mot_rec[k] = v
+
+ records.append(mot_rec)
+ if self.sample_num > 0 and img_index >= self.sample_num:
+ break
+ assert len(records) > 0, 'not found any mot record in %s' % (
+ self.image_lists)
+ self.roidbs, self.cname2cid = records, cname2cid
+
+
+@register
+@serializable
+class MCMOTDataSet(DetDataset):
+ """
+ Load dataset with MOT format, support multi-class MOT.
+
+ Args:
+ dataset_dir (str): root directory for dataset.
+ image_lists (list(str)): mcmot data image lists, muiti-source mcmot dataset.
+ data_fields (list): key name of data dictionary, at least have 'image'.
+ label_list (str): if use_default_label is False, will load
+ mapping between category and class index.
+ sample_num (int): number of samples to load, -1 means all.
+
+ Notes:
+ MCMOT datasets root directory following this:
+ dataset/mot
+ |——————image_lists
+ | |——————visdrone_mcmot.train
+ | |——————visdrone_mcmot.val
+ visdrone_mcmot
+ |——————images
+ | └——————train
+ | └——————val
+ └——————labels_with_ids
+ └——————train
+ """
+
+ def __init__(self,
+ dataset_dir=None,
+ image_lists=[],
+ data_fields=['image'],
+ label_list=None,
+ sample_num=-1):
+ super(MCMOTDataSet, self).__init__(
+ dataset_dir=dataset_dir,
+ data_fields=data_fields,
+ sample_num=sample_num)
+ self.dataset_dir = dataset_dir
+ self.image_lists = image_lists
+ if isinstance(self.image_lists, str):
+ self.image_lists = [self.image_lists]
+ self.label_list = label_list
+ self.roidbs = None
+ self.cname2cid = None
+
+ def get_anno(self):
+ if self.image_lists == []:
+ return
+ # only used to get categories and metric
+ # only check first data, but the label_list of all data should be same.
+ first_mot_data = self.image_lists[0].split('.')[0]
+ anno_file = os.path.join(self.dataset_dir, first_mot_data, 'label_list.txt')
+ return anno_file
+
+ def parse_dataset(self):
+ self.img_files = OrderedDict()
+ self.img_start_index = OrderedDict()
+ self.label_files = OrderedDict()
+ self.tid_num = OrderedDict()
+ self.tid_start_idx_of_cls_ids = defaultdict(dict) # for MCMOT
+
+ img_index = 0
+ for data_name in self.image_lists:
+ # check every data image list
+ image_lists_dir = os.path.join(self.dataset_dir, 'image_lists')
+ assert os.path.isdir(image_lists_dir), \
+ "The {} is not a directory.".format(image_lists_dir)
+
+ list_path = os.path.join(image_lists_dir, data_name)
+ assert os.path.exists(list_path), \
+ "The list path {} does not exist.".format(list_path)
+
+ # record img_files, filter out empty ones
+ with open(list_path, 'r') as file:
+ self.img_files[data_name] = file.readlines()
+ self.img_files[data_name] = [
+ os.path.join(self.dataset_dir, x.strip())
+ for x in self.img_files[data_name]
+ ]
+ self.img_files[data_name] = list(
+ filter(lambda x: len(x) > 0, self.img_files[data_name]))
+
+ self.img_start_index[data_name] = img_index
+ img_index += len(self.img_files[data_name])
+
+ # record label_files
+ self.label_files[data_name] = [
+ x.replace('images', 'labels_with_ids').replace(
+ '.png', '.txt').replace('.jpg', '.txt')
+ for x in self.img_files[data_name]
+ ]
+
+ for data_name, label_paths in self.label_files.items():
+ # using max_ids_dict rather than max_index
+ max_ids_dict = defaultdict(int)
+ for lp in label_paths:
+ lb = np.loadtxt(lp)
+ if len(lb) < 1:
+ continue
+ lb = lb.reshape(-1, 6)
+ for item in lb:
+ if item[1] > max_ids_dict[int(item[0])]:
+ # item[0]: cls_id
+ # item[1]: track id
+ max_ids_dict[int(item[0])] = int(item[1])
+ # track id number
+ self.tid_num[data_name] = max_ids_dict
+
+ last_idx_dict = defaultdict(int)
+ for i, (k, v) in enumerate(self.tid_num.items()): # each sub dataset
+ for cls_id, id_num in v.items(): # v is a max_ids_dict
+ self.tid_start_idx_of_cls_ids[k][cls_id] = last_idx_dict[cls_id]
+ last_idx_dict[cls_id] += id_num
+
+ self.num_identities_dict = defaultdict(int)
+ for k, v in last_idx_dict.items():
+ self.num_identities_dict[k] = int(v) # total ids of each category
+
+ self.num_imgs_each_data = [len(x) for x in self.img_files.values()]
+ self.total_imgs = sum(self.num_imgs_each_data)
+
+ # cname2cid and cid2cname
+ cname2cid = {}
+ if self.label_list is not None:
+ # if use label_list for multi source mix dataset,
+ # please make sure label_list in the first sub_dataset at least.
+ sub_dataset = self.image_lists[0].split('.')[0]
+ label_path = os.path.join(self.dataset_dir, sub_dataset,
+ self.label_list)
+ if not os.path.exists(label_path):
+ logger.info(
+ "Note: label_list {} does not exists, use VisDrone 10 classes labels as default.".
+ format(label_path))
+ cname2cid = visdrone_mcmot_label()
+ else:
+ with open(label_path, 'r') as fr:
+ label_id = 0
+ for line in fr.readlines():
+ cname2cid[line.strip()] = label_id
+ label_id += 1
+ else:
+ cname2cid = visdrone_mcmot_label()
+
+ cid2cname = dict([(v, k) for (k, v) in cname2cid.items()])
+
+ logger.info('MCMOT dataset summary: ')
+ logger.info(self.tid_num)
+ logger.info('Total images: {}'.format(self.total_imgs))
+ logger.info('Image start index: {}'.format(self.img_start_index))
+
+ logger.info('Total identities of each category: ')
+ num_identities_dict = sorted(
+ self.num_identities_dict.items(), key=lambda x: x[0])
+ total_IDs_all_cats = 0
+ for (k, v) in num_identities_dict:
+ logger.info('Category {} [{}] has {} IDs.'.format(k, cid2cname[k],
+ v))
+ total_IDs_all_cats += v
+ logger.info('Total identities of all categories: {}'.format(
+ total_IDs_all_cats))
+
+ logger.info('Identity start index of each category: ')
+ for k, v in self.tid_start_idx_of_cls_ids.items():
+ sorted_v = sorted(v.items(), key=lambda x: x[0])
+ for (cls_id, start_idx) in sorted_v:
+ logger.info('Start index of dataset {} category {:d} is {:d}'
+ .format(k, cls_id, start_idx))
+
+ records = []
+ for img_index in range(self.total_imgs):
+ for i, (k, v) in enumerate(self.img_start_index.items()):
+ if img_index >= v:
+ data_name = list(self.label_files.keys())[i]
+ start_index = v
+ img_file = self.img_files[data_name][img_index - start_index]
+ lbl_file = self.label_files[data_name][img_index - start_index]
+
+ if not os.path.exists(img_file):
+ logger.warning('Illegal image file: {}, and it will be ignored'.
+ format(img_file))
+ continue
+ if not os.path.isfile(lbl_file):
+ logger.warning('Illegal label file: {}, and it will be ignored'.
+ format(lbl_file))
+ continue
+
+ labels = np.loadtxt(lbl_file, dtype=np.float32).reshape(-1, 6)
+ # each row in labels (N, 6) is [gt_class, gt_identity, cx, cy, w, h]
+
+ cx, cy = labels[:, 2], labels[:, 3]
+ w, h = labels[:, 4], labels[:, 5]
+ gt_bbox = np.stack((cx, cy, w, h)).T.astype('float32')
+ gt_class = labels[:, 0:1].astype('int32')
+ gt_score = np.ones((len(labels), 1)).astype('float32')
+ gt_ide = labels[:, 1:2].astype('int32')
+ for i, _ in enumerate(gt_ide):
+ if gt_ide[i] > -1:
+ cls_id = int(gt_class[i])
+ start_idx = self.tid_start_idx_of_cls_ids[data_name][cls_id]
+ gt_ide[i] += start_idx
+
+ mot_rec = {
+ 'im_file': img_file,
+ 'im_id': img_index,
+ } if 'image' in self.data_fields else {}
+
+ gt_rec = {
+ 'gt_class': gt_class,
+ 'gt_score': gt_score,
+ 'gt_bbox': gt_bbox,
+ 'gt_ide': gt_ide,
+ }
+
+ for k, v in gt_rec.items():
+ if k in self.data_fields:
+ mot_rec[k] = v
+
+ records.append(mot_rec)
+ if self.sample_num > 0 and img_index >= self.sample_num:
+ break
+ assert len(records) > 0, 'not found any mot record in %s' % (
+ self.image_lists)
+ self.roidbs, self.cname2cid = records, cname2cid
+
+
+@register
+@serializable
+class MOTImageFolder(DetDataset):
+ """
+ Load MOT dataset with MOT format from image folder or video .
+ Args:
+ video_file (str): path of the video file, default ''.
+ frame_rate (int): frame rate of the video, use cv2 VideoCapture if not set.
+ dataset_dir (str): root directory for dataset.
+ keep_ori_im (bool): whether to keep original image, default False.
+ Set True when used during MOT model inference while saving
+ images or video, or used in DeepSORT.
+ """
+
+ def __init__(self,
+ video_file=None,
+ frame_rate=-1,
+ dataset_dir=None,
+ data_root=None,
+ image_dir=None,
+ sample_num=-1,
+ keep_ori_im=False,
+ **kwargs):
+ super(MOTImageFolder, self).__init__(
+ dataset_dir, image_dir, sample_num=sample_num)
+ self.video_file = video_file
+ self.data_root = data_root
+ self.keep_ori_im = keep_ori_im
+ self._imid2path = {}
+ self.roidbs = None
+ self.frame_rate = frame_rate
+
+ def check_or_download_dataset(self):
+ return
+
+ def parse_dataset(self, ):
+ if not self.roidbs:
+ if self.video_file is None:
+ self.frame_rate = 30 # set as default if infer image folder
+ self.roidbs = self._load_images()
+ else:
+ self.roidbs = self._load_video_images()
+
+ def _load_video_images(self):
+ if self.frame_rate == -1:
+ # if frame_rate is not set for video, use cv2.VideoCapture
+ cap = cv2.VideoCapture(self.video_file)
+ self.frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
+
+ extension = self.video_file.split('.')[-1]
+ output_path = self.video_file.replace('.{}'.format(extension), '')
+ frames_path = video2frames(self.video_file, output_path,
+ self.frame_rate)
+ self.video_frames = sorted(
+ glob.glob(os.path.join(frames_path, '*.png')))
+
+ self.video_length = len(self.video_frames)
+ logger.info('Length of the video: {:d} frames.'.format(
+ self.video_length))
+ ct = 0
+ records = []
+ for image in self.video_frames:
+ assert image != '' and os.path.isfile(image), \
+ "Image {} not found".format(image)
+ if self.sample_num > 0 and ct >= self.sample_num:
+ break
+ rec = {'im_id': np.array([ct]), 'im_file': image}
+ if self.keep_ori_im:
+ rec.update({'keep_ori_im': 1})
+ self._imid2path[ct] = image
+ ct += 1
+ records.append(rec)
+ assert len(records) > 0, "No image file found"
+ return records
+
+ def _find_images(self):
+ image_dir = self.image_dir
+ if not isinstance(image_dir, Sequence):
+ image_dir = [image_dir]
+ images = []
+ for im_dir in image_dir:
+ if os.path.isdir(im_dir):
+ im_dir = os.path.join(self.dataset_dir, im_dir)
+ images.extend(_make_dataset(im_dir))
+ elif os.path.isfile(im_dir) and _is_valid_file(im_dir):
+ images.append(im_dir)
+ return images
+
+ def _load_images(self):
+ images = self._find_images()
+ ct = 0
+ records = []
+ for image in images:
+ assert image != '' and os.path.isfile(image), \
+ "Image {} not found".format(image)
+ if self.sample_num > 0 and ct >= self.sample_num:
+ break
+ rec = {'im_id': np.array([ct]), 'im_file': image}
+ if self.keep_ori_im:
+ rec.update({'keep_ori_im': 1})
+ self._imid2path[ct] = image
+ ct += 1
+ records.append(rec)
+ assert len(records) > 0, "No image file found"
+ return records
+
+ def get_imid2path(self):
+ return self._imid2path
+
+ def set_images(self, images):
+ self.image_dir = images
+ self.roidbs = self._load_images()
+
+ def set_video(self, video_file, frame_rate):
+ # update video_file and frame_rate by command line of tools/infer_mot.py
+ self.video_file = video_file
+ self.frame_rate = frame_rate
+ assert os.path.isfile(self.video_file) and _is_valid_video(self.video_file), \
+ "wrong or unsupported file format: {}".format(self.video_file)
+ self.roidbs = self._load_video_images()
+
+
+def _is_valid_video(f, extensions=('.mp4', '.avi', '.mov', '.rmvb', 'flv')):
+ return f.lower().endswith(extensions)
+
+
+def video2frames(video_path, outpath, frame_rate, **kargs):
+ def _dict2str(kargs):
+ cmd_str = ''
+ for k, v in kargs.items():
+ cmd_str += (' ' + str(k) + ' ' + str(v))
+ return cmd_str
+
+ ffmpeg = ['ffmpeg ', ' -y -loglevel ', ' error ']
+ vid_name = os.path.basename(video_path).split('.')[0]
+ out_full_path = os.path.join(outpath, vid_name)
+
+ if not os.path.exists(out_full_path):
+ os.makedirs(out_full_path)
+
+ # video file name
+ outformat = os.path.join(out_full_path, '%08d.png')
+
+ cmd = ffmpeg
+ cmd = ffmpeg + [
+ ' -i ', video_path, ' -r ', str(frame_rate), ' -f image2 ', outformat
+ ]
+ cmd = ''.join(cmd) + _dict2str(kargs)
+
+ if os.system(cmd) != 0:
+ raise RuntimeError('ffmpeg process video: {} error'.format(video_path))
+ sys.exit(-1)
+
+ sys.stdout.flush()
+ return out_full_path
+
+
+def mot_label():
+ labels_map = {'person': 0}
+ return labels_map
+
+
+def visdrone_mcmot_label():
+ labels_map = {
+ 'pedestrian': 0,
+ 'people': 1,
+ 'bicycle': 2,
+ 'car': 3,
+ 'van': 4,
+ 'truck': 5,
+ 'tricycle': 6,
+ 'awning-tricycle': 7,
+ 'bus': 8,
+ 'motor': 9,
+ }
+ return labels_map
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/sniper_coco.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/sniper_coco.py
new file mode 100644
index 000000000..1b07e7a31
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/sniper_coco.py
@@ -0,0 +1,194 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import cv2
+import json
+import copy
+import numpy as np
+
+try:
+ from collections.abc import Sequence
+except Exception:
+ from collections import Sequence
+
+from ppdet.core.workspace import register, serializable
+from ppdet.data.crop_utils.annotation_cropper import AnnoCropper
+from .coco import COCODataSet
+from .dataset import _make_dataset, _is_valid_file
+from ppdet.utils.logger import setup_logger
+
+logger = setup_logger('sniper_coco_dataset')
+
+
+@register
+@serializable
+class SniperCOCODataSet(COCODataSet):
+ """SniperCOCODataSet"""
+
+ def __init__(self,
+ dataset_dir=None,
+ image_dir=None,
+ anno_path=None,
+ proposals_file=None,
+ data_fields=['image'],
+ sample_num=-1,
+ load_crowd=False,
+ allow_empty=True,
+ empty_ratio=1.,
+ is_trainset=True,
+ image_target_sizes=[2000, 1000],
+ valid_box_ratio_ranges=[[-1, 0.1],[0.08, -1]],
+ chip_target_size=500,
+ chip_target_stride=200,
+ use_neg_chip=False,
+ max_neg_num_per_im=8,
+ max_per_img=-1,
+ nms_thresh=0.5):
+ super(SniperCOCODataSet, self).__init__(
+ dataset_dir=dataset_dir,
+ image_dir=image_dir,
+ anno_path=anno_path,
+ data_fields=data_fields,
+ sample_num=sample_num,
+ load_crowd=load_crowd,
+ allow_empty=allow_empty,
+ empty_ratio=empty_ratio
+ )
+ self.proposals_file = proposals_file
+ self.proposals = None
+ self.anno_cropper = None
+ self.is_trainset = is_trainset
+ self.image_target_sizes = image_target_sizes
+ self.valid_box_ratio_ranges = valid_box_ratio_ranges
+ self.chip_target_size = chip_target_size
+ self.chip_target_stride = chip_target_stride
+ self.use_neg_chip = use_neg_chip
+ self.max_neg_num_per_im = max_neg_num_per_im
+ self.max_per_img = max_per_img
+ self.nms_thresh = nms_thresh
+
+
+ def parse_dataset(self):
+ if not hasattr(self, "roidbs"):
+ super(SniperCOCODataSet, self).parse_dataset()
+ if self.is_trainset:
+ self._parse_proposals()
+ self._merge_anno_proposals()
+ self.ori_roidbs = copy.deepcopy(self.roidbs)
+ self.init_anno_cropper()
+ self.roidbs = self.generate_chips_roidbs(self.roidbs, self.is_trainset)
+
+ def set_proposals_file(self, file_path):
+ self.proposals_file = file_path
+
+ def init_anno_cropper(self):
+ logger.info("Init AnnoCropper...")
+ self.anno_cropper = AnnoCropper(
+ image_target_sizes=self.image_target_sizes,
+ valid_box_ratio_ranges=self.valid_box_ratio_ranges,
+ chip_target_size=self.chip_target_size,
+ chip_target_stride=self.chip_target_stride,
+ use_neg_chip=self.use_neg_chip,
+ max_neg_num_per_im=self.max_neg_num_per_im,
+ max_per_img=self.max_per_img,
+ nms_thresh=self.nms_thresh
+ )
+
+ def generate_chips_roidbs(self, roidbs, is_trainset):
+ if is_trainset:
+ roidbs = self.anno_cropper.crop_anno_records(roidbs)
+ else:
+ roidbs = self.anno_cropper.crop_infer_anno_records(roidbs)
+ return roidbs
+
+ def _parse_proposals(self):
+ if self.proposals_file:
+ self.proposals = {}
+ logger.info("Parse proposals file:{}".format(self.proposals_file))
+ with open(self.proposals_file, 'r') as f:
+ proposals = json.load(f)
+ for prop in proposals:
+ image_id = prop["image_id"]
+ if image_id not in self.proposals:
+ self.proposals[image_id] = []
+ x, y, w, h = prop["bbox"]
+ self.proposals[image_id].append([x, y, x + w, y + h])
+
+ def _merge_anno_proposals(self):
+ assert self.roidbs
+ if self.proposals and len(self.proposals.keys()) > 0:
+ logger.info("merge proposals to annos")
+ for id, record in enumerate(self.roidbs):
+ image_id = int(record["im_id"])
+ if image_id not in self.proposals.keys():
+ logger.info("image id :{} no proposals".format(image_id))
+ record["proposals"] = np.array(self.proposals.get(image_id, []), dtype=np.float32)
+ self.roidbs[id] = record
+
+ def get_ori_roidbs(self):
+ if not hasattr(self, "ori_roidbs"):
+ return None
+ return self.ori_roidbs
+
+ def get_roidbs(self):
+ if not hasattr(self, "roidbs"):
+ self.parse_dataset()
+ return self.roidbs
+
+ def set_roidbs(self, roidbs):
+ self.roidbs = roidbs
+
+ def check_or_download_dataset(self):
+ return
+
+ def _parse(self):
+ image_dir = self.image_dir
+ if not isinstance(image_dir, Sequence):
+ image_dir = [image_dir]
+ images = []
+ for im_dir in image_dir:
+ if os.path.isdir(im_dir):
+ im_dir = os.path.join(self.dataset_dir, im_dir)
+ images.extend(_make_dataset(im_dir))
+ elif os.path.isfile(im_dir) and _is_valid_file(im_dir):
+ images.append(im_dir)
+ return images
+
+ def _load_images(self):
+ images = self._parse()
+ ct = 0
+ records = []
+ for image in images:
+ assert image != '' and os.path.isfile(image), \
+ "Image {} not found".format(image)
+ if self.sample_num > 0 and ct >= self.sample_num:
+ break
+ im = cv2.imread(image)
+ h, w, c = im.shape
+ rec = {'im_id': np.array([ct]), 'im_file': image, "h": h, "w": w}
+ self._imid2path[ct] = image
+ ct += 1
+ records.append(rec)
+ assert len(records) > 0, "No image file found"
+ return records
+
+ def get_imid2path(self):
+ return self._imid2path
+
+ def set_images(self, images):
+ self._imid2path = {}
+ self.image_dir = images
+ self.roidbs = self._load_images()
+
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/voc.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/voc.py
new file mode 100644
index 000000000..1c2a7ef98
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/voc.py
@@ -0,0 +1,231 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import numpy as np
+
+import xml.etree.ElementTree as ET
+
+from ppdet.core.workspace import register, serializable
+
+from .dataset import DetDataset
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+@register
+@serializable
+class VOCDataSet(DetDataset):
+ """
+ Load dataset with PascalVOC format.
+
+ Notes:
+ `anno_path` must contains xml file and image file path for annotations.
+
+ Args:
+ dataset_dir (str): root directory for dataset.
+ image_dir (str): directory for images.
+ anno_path (str): voc annotation file path.
+ data_fields (list): key name of data dictionary, at least have 'image'.
+ sample_num (int): number of samples to load, -1 means all.
+ label_list (str): if use_default_label is False, will load
+ mapping between category and class index.
+ allow_empty (bool): whether to load empty entry. False as default
+ empty_ratio (float): the ratio of empty record number to total
+ record's, if empty_ratio is out of [0. ,1.), do not sample the
+ records and use all the empty entries. 1. as default
+ """
+
+ def __init__(self,
+ dataset_dir=None,
+ image_dir=None,
+ anno_path=None,
+ data_fields=['image'],
+ sample_num=-1,
+ label_list=None,
+ allow_empty=False,
+ empty_ratio=1.):
+ super(VOCDataSet, self).__init__(
+ dataset_dir=dataset_dir,
+ image_dir=image_dir,
+ anno_path=anno_path,
+ data_fields=data_fields,
+ sample_num=sample_num)
+ self.label_list = label_list
+ self.allow_empty = allow_empty
+ self.empty_ratio = empty_ratio
+
+ def _sample_empty(self, records, num):
+ # if empty_ratio is out of [0. ,1.), do not sample the records
+ if self.empty_ratio < 0. or self.empty_ratio >= 1.:
+ return records
+ import random
+ sample_num = min(
+ int(num * self.empty_ratio / (1 - self.empty_ratio)), len(records))
+ records = random.sample(records, sample_num)
+ return records
+
+ def parse_dataset(self, ):
+ anno_path = os.path.join(self.dataset_dir, self.anno_path)
+ image_dir = os.path.join(self.dataset_dir, self.image_dir)
+
+ # mapping category name to class id
+ # first_class:0, second_class:1, ...
+ records = []
+ empty_records = []
+ ct = 0
+ cname2cid = {}
+ if self.label_list:
+ label_path = os.path.join(self.dataset_dir, self.label_list)
+ if not os.path.exists(label_path):
+ raise ValueError("label_list {} does not exists".format(
+ label_path))
+ with open(label_path, 'r') as fr:
+ label_id = 0
+ for line in fr.readlines():
+ cname2cid[line.strip()] = label_id
+ label_id += 1
+ else:
+ cname2cid = pascalvoc_label()
+
+ with open(anno_path, 'r') as fr:
+ while True:
+ line = fr.readline()
+ if not line:
+ break
+ img_file, xml_file = [os.path.join(image_dir, x) \
+ for x in line.strip().split()[:2]]
+ if not os.path.exists(img_file):
+ logger.warning(
+ 'Illegal image file: {}, and it will be ignored'.format(
+ img_file))
+ continue
+ if not os.path.isfile(xml_file):
+ logger.warning(
+ 'Illegal xml file: {}, and it will be ignored'.format(
+ xml_file))
+ continue
+ tree = ET.parse(xml_file)
+ if tree.find('id') is None:
+ im_id = np.array([ct])
+ else:
+ im_id = np.array([int(tree.find('id').text)])
+
+ objs = tree.findall('object')
+ im_w = float(tree.find('size').find('width').text)
+ im_h = float(tree.find('size').find('height').text)
+ if im_w < 0 or im_h < 0:
+ logger.warning(
+ 'Illegal width: {} or height: {} in annotation, '
+ 'and {} will be ignored'.format(im_w, im_h, xml_file))
+ continue
+
+ num_bbox, i = len(objs), 0
+ gt_bbox = np.zeros((num_bbox, 4), dtype=np.float32)
+ gt_class = np.zeros((num_bbox, 1), dtype=np.int32)
+ gt_score = np.zeros((num_bbox, 1), dtype=np.float32)
+ difficult = np.zeros((num_bbox, 1), dtype=np.int32)
+ for obj in objs:
+ cname = obj.find('name').text
+
+ # user dataset may not contain difficult field
+ _difficult = obj.find('difficult')
+ _difficult = int(
+ _difficult.text) if _difficult is not None else 0
+
+ x1 = float(obj.find('bndbox').find('xmin').text)
+ y1 = float(obj.find('bndbox').find('ymin').text)
+ x2 = float(obj.find('bndbox').find('xmax').text)
+ y2 = float(obj.find('bndbox').find('ymax').text)
+ x1 = max(0, x1)
+ y1 = max(0, y1)
+ x2 = min(im_w - 1, x2)
+ y2 = min(im_h - 1, y2)
+ if x2 > x1 and y2 > y1:
+ gt_bbox[i, :] = [x1, y1, x2, y2]
+ gt_class[i, 0] = cname2cid[cname]
+ gt_score[i, 0] = 1.
+ difficult[i, 0] = _difficult
+ i += 1
+ else:
+ logger.warning(
+ 'Found an invalid bbox in annotations: xml_file: {}'
+ ', x1: {}, y1: {}, x2: {}, y2: {}.'.format(
+ xml_file, x1, y1, x2, y2))
+ gt_bbox = gt_bbox[:i, :]
+ gt_class = gt_class[:i, :]
+ gt_score = gt_score[:i, :]
+ difficult = difficult[:i, :]
+
+ voc_rec = {
+ 'im_file': img_file,
+ 'im_id': im_id,
+ 'h': im_h,
+ 'w': im_w
+ } if 'image' in self.data_fields else {}
+
+ gt_rec = {
+ 'gt_class': gt_class,
+ 'gt_score': gt_score,
+ 'gt_bbox': gt_bbox,
+ 'difficult': difficult
+ }
+ for k, v in gt_rec.items():
+ if k in self.data_fields:
+ voc_rec[k] = v
+
+ if len(objs) == 0:
+ empty_records.append(voc_rec)
+ else:
+ records.append(voc_rec)
+
+ ct += 1
+ if self.sample_num > 0 and ct >= self.sample_num:
+ break
+ assert ct > 0, 'not found any voc record in %s' % (self.anno_path)
+ logger.debug('{} samples in file {}'.format(ct, anno_path))
+ if self.allow_empty and len(empty_records) > 0:
+ empty_records = self._sample_empty(empty_records, len(records))
+ records += empty_records
+ self.roidbs, self.cname2cid = records, cname2cid
+
+ def get_label_list(self):
+ return os.path.join(self.dataset_dir, self.label_list)
+
+
+def pascalvoc_label():
+ labels_map = {
+ 'aeroplane': 0,
+ 'bicycle': 1,
+ 'bird': 2,
+ 'boat': 3,
+ 'bottle': 4,
+ 'bus': 5,
+ 'car': 6,
+ 'cat': 7,
+ 'chair': 8,
+ 'cow': 9,
+ 'diningtable': 10,
+ 'dog': 11,
+ 'horse': 12,
+ 'motorbike': 13,
+ 'person': 14,
+ 'pottedplant': 15,
+ 'sheep': 16,
+ 'sofa': 17,
+ 'train': 18,
+ 'tvmonitor': 19
+ }
+ return labels_map
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/widerface.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/widerface.py
new file mode 100644
index 000000000..a17c2aaf8
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/source/widerface.py
@@ -0,0 +1,180 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import numpy as np
+
+from ppdet.core.workspace import register, serializable
+from .dataset import DetDataset
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+@register
+@serializable
+class WIDERFaceDataSet(DetDataset):
+ """
+ Load WiderFace records with 'anno_path'
+
+ Args:
+ dataset_dir (str): root directory for dataset.
+ image_dir (str): directory for images.
+ anno_path (str): WiderFace annotation data.
+ data_fields (list): key name of data dictionary, at least have 'image'.
+ sample_num (int): number of samples to load, -1 means all.
+ with_lmk (bool): whether to load face landmark keypoint labels.
+ """
+
+ def __init__(self,
+ dataset_dir=None,
+ image_dir=None,
+ anno_path=None,
+ data_fields=['image'],
+ sample_num=-1,
+ with_lmk=False):
+ super(WIDERFaceDataSet, self).__init__(
+ dataset_dir=dataset_dir,
+ image_dir=image_dir,
+ anno_path=anno_path,
+ data_fields=data_fields,
+ sample_num=sample_num,
+ with_lmk=with_lmk)
+ self.anno_path = anno_path
+ self.sample_num = sample_num
+ self.roidbs = None
+ self.cname2cid = None
+ self.with_lmk = with_lmk
+
+ def parse_dataset(self):
+ anno_path = os.path.join(self.dataset_dir, self.anno_path)
+ image_dir = os.path.join(self.dataset_dir, self.image_dir)
+
+ txt_file = anno_path
+
+ records = []
+ ct = 0
+ file_lists = self._load_file_list(txt_file)
+ cname2cid = widerface_label()
+
+ for item in file_lists:
+ im_fname = item[0]
+ im_id = np.array([ct])
+ gt_bbox = np.zeros((len(item) - 1, 4), dtype=np.float32)
+ gt_class = np.zeros((len(item) - 1, 1), dtype=np.int32)
+ gt_lmk_labels = np.zeros((len(item) - 1, 10), dtype=np.float32)
+ lmk_ignore_flag = np.zeros((len(item) - 1, 1), dtype=np.int32)
+ for index_box in range(len(item)):
+ if index_box < 1:
+ continue
+ gt_bbox[index_box - 1] = item[index_box][0]
+ if self.with_lmk:
+ gt_lmk_labels[index_box - 1] = item[index_box][1]
+ lmk_ignore_flag[index_box - 1] = item[index_box][2]
+ im_fname = os.path.join(image_dir,
+ im_fname) if image_dir else im_fname
+ widerface_rec = {
+ 'im_file': im_fname,
+ 'im_id': im_id,
+ } if 'image' in self.data_fields else {}
+ gt_rec = {
+ 'gt_bbox': gt_bbox,
+ 'gt_class': gt_class,
+ }
+ for k, v in gt_rec.items():
+ if k in self.data_fields:
+ widerface_rec[k] = v
+ if self.with_lmk:
+ widerface_rec['gt_keypoint'] = gt_lmk_labels
+ widerface_rec['keypoint_ignore'] = lmk_ignore_flag
+
+ if len(item) != 0:
+ records.append(widerface_rec)
+
+ ct += 1
+ if self.sample_num > 0 and ct >= self.sample_num:
+ break
+ assert len(records) > 0, 'not found any widerface in %s' % (anno_path)
+ logger.debug('{} samples in file {}'.format(ct, anno_path))
+ self.roidbs, self.cname2cid = records, cname2cid
+
+ def _load_file_list(self, input_txt):
+ with open(input_txt, 'r') as f_dir:
+ lines_input_txt = f_dir.readlines()
+
+ file_dict = {}
+ num_class = 0
+ exts = ['jpg', 'jpeg', 'png', 'bmp']
+ exts += [ext.upper() for ext in exts]
+ for i in range(len(lines_input_txt)):
+ line_txt = lines_input_txt[i].strip('\n\t\r')
+ split_str = line_txt.split(' ')
+ if len(split_str) == 1:
+ img_file_name = os.path.split(split_str[0])[1]
+ split_txt = img_file_name.split('.')
+ if len(split_txt) < 2:
+ continue
+ elif split_txt[-1] in exts:
+ if i != 0:
+ num_class += 1
+ file_dict[num_class] = [line_txt]
+ else:
+ if len(line_txt) <= 6:
+ continue
+ result_boxs = []
+ xmin = float(split_str[0])
+ ymin = float(split_str[1])
+ w = float(split_str[2])
+ h = float(split_str[3])
+ # Filter out wrong labels
+ if w < 0 or h < 0:
+ logger.warning('Illegal box with w: {}, h: {} in '
+ 'img: {}, and it will be ignored'.format(
+ w, h, file_dict[num_class][0]))
+ continue
+ xmin = max(0, xmin)
+ ymin = max(0, ymin)
+ xmax = xmin + w
+ ymax = ymin + h
+ gt_bbox = [xmin, ymin, xmax, ymax]
+ result_boxs.append(gt_bbox)
+ if self.with_lmk:
+ assert len(split_str) > 18, 'When `with_lmk=True`, the number' \
+ 'of characters per line in the annotation file should' \
+ 'exceed 18.'
+ lmk0_x = float(split_str[5])
+ lmk0_y = float(split_str[6])
+ lmk1_x = float(split_str[8])
+ lmk1_y = float(split_str[9])
+ lmk2_x = float(split_str[11])
+ lmk2_y = float(split_str[12])
+ lmk3_x = float(split_str[14])
+ lmk3_y = float(split_str[15])
+ lmk4_x = float(split_str[17])
+ lmk4_y = float(split_str[18])
+ lmk_ignore_flag = 0 if lmk0_x == -1 else 1
+ gt_lmk_label = [
+ lmk0_x, lmk0_y, lmk1_x, lmk1_y, lmk2_x, lmk2_y, lmk3_x,
+ lmk3_y, lmk4_x, lmk4_y
+ ]
+ result_boxs.append(gt_lmk_label)
+ result_boxs.append(lmk_ignore_flag)
+ file_dict[num_class].append(result_boxs)
+
+ return list(file_dict.values())
+
+
+def widerface_label():
+ labels_map = {'face': 0}
+ return labels_map
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__init__.py
new file mode 100644
index 000000000..fb8a1a449
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__init__.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import operators
+from . import batch_operators
+from . import keypoint_operators
+from . import mot_operators
+
+from .operators import *
+from .batch_operators import *
+from .keypoint_operators import *
+from .mot_operators import *
+
+__all__ = []
+__all__ += registered_ops
+__all__ += keypoint_operators.__all__
+__all__ += mot_operators.__all__
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..ada30eb03
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/atss_assigner.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/atss_assigner.cpython-37.pyc
new file mode 100644
index 000000000..937f1cda9
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/atss_assigner.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/batch_operators.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/batch_operators.cpython-37.pyc
new file mode 100644
index 000000000..06f9ac59e
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/batch_operators.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/keypoint_operators.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/keypoint_operators.cpython-37.pyc
new file mode 100644
index 000000000..54cc72243
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/keypoint_operators.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/mot_operators.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/mot_operators.cpython-37.pyc
new file mode 100644
index 000000000..0de9ed2c8
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/mot_operators.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/op_helper.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/op_helper.cpython-37.pyc
new file mode 100644
index 000000000..a2570b09e
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/op_helper.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/operators.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/operators.cpython-37.pyc
new file mode 100644
index 000000000..8f1e42647
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/__pycache__/operators.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/atss_assigner.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/atss_assigner.py
new file mode 100644
index 000000000..178d94fb6
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/atss_assigner.py
@@ -0,0 +1,269 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on:
+# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/atss_assigner.py
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
+ """Calculate overlap between two set of bboxes.
+ If ``is_aligned `` is ``False``, then calculate the overlaps between each
+ bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
+ pair of bboxes1 and bboxes2.
+ Args:
+ bboxes1 (Tensor): shape (B, m, 4) in format or empty.
+ bboxes2 (Tensor): shape (B, n, 4) in format or empty.
+ B indicates the batch dim, in shape (B1, B2, ..., Bn).
+ If ``is_aligned `` is ``True``, then m and n must be equal.
+ mode (str): "iou" (intersection over union) or "iof" (intersection over
+ foreground).
+ is_aligned (bool, optional): If True, then m and n must be equal.
+ Default False.
+ eps (float, optional): A value added to the denominator for numerical
+ stability. Default 1e-6.
+ Returns:
+ Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
+ """
+ assert mode in ['iou', 'iof', 'giou'], 'Unsupported mode {}'.format(mode)
+ # Either the boxes are empty or the length of boxes's last dimenstion is 4
+ assert (bboxes1.shape[-1] == 4 or bboxes1.shape[0] == 0)
+ assert (bboxes2.shape[-1] == 4 or bboxes2.shape[0] == 0)
+
+ # Batch dim must be the same
+ # Batch dim: (B1, B2, ... Bn)
+ assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
+ batch_shape = bboxes1.shape[:-2]
+
+ rows = bboxes1.shape[-2] if bboxes1.shape[0] > 0 else 0
+ cols = bboxes2.shape[-2] if bboxes2.shape[0] > 0 else 0
+ if is_aligned:
+ assert rows == cols
+
+ if rows * cols == 0:
+ if is_aligned:
+ return np.random.random(batch_shape + (rows, ))
+ else:
+ return np.random.random(batch_shape + (rows, cols))
+
+ area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
+ bboxes1[..., 3] - bboxes1[..., 1])
+ area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
+ bboxes2[..., 3] - bboxes2[..., 1])
+
+ if is_aligned:
+ lt = np.maximum(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
+ rb = np.minimum(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
+
+ wh = (rb - lt).clip(min=0) # [B, rows, 2]
+ overlap = wh[..., 0] * wh[..., 1]
+
+ if mode in ['iou', 'giou']:
+ union = area1 + area2 - overlap
+ else:
+ union = area1
+ if mode == 'giou':
+ enclosed_lt = np.minimum(bboxes1[..., :2], bboxes2[..., :2])
+ enclosed_rb = np.maximum(bboxes1[..., 2:], bboxes2[..., 2:])
+ else:
+ lt = np.maximum(bboxes1[..., :, None, :2],
+ bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
+ rb = np.minimum(bboxes1[..., :, None, 2:],
+ bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
+
+ wh = (rb - lt).clip(min=0) # [B, rows, cols, 2]
+ overlap = wh[..., 0] * wh[..., 1]
+
+ if mode in ['iou', 'giou']:
+ union = area1[..., None] + area2[..., None, :] - overlap
+ else:
+ union = area1[..., None]
+ if mode == 'giou':
+ enclosed_lt = np.minimum(bboxes1[..., :, None, :2],
+ bboxes2[..., None, :, :2])
+ enclosed_rb = np.maximum(bboxes1[..., :, None, 2:],
+ bboxes2[..., None, :, 2:])
+
+ eps = np.array([eps])
+ union = np.maximum(union, eps)
+ ious = overlap / union
+ if mode in ['iou', 'iof']:
+ return ious
+ # calculate gious
+ enclose_wh = (enclosed_rb - enclosed_lt).clip(min=0)
+ enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
+ enclose_area = np.maximum(enclose_area, eps)
+ gious = ious - (enclose_area - union) / enclose_area
+ return gious
+
+
+def topk_(input, k, axis=1, largest=True):
+ x = -input if largest else input
+ if axis == 0:
+ row_index = np.arange(input.shape[1 - axis])
+ topk_index = np.argpartition(x, k, axis=axis)[0:k, :]
+ topk_data = x[topk_index, row_index]
+
+ topk_index_sort = np.argsort(topk_data, axis=axis)
+ topk_data_sort = topk_data[topk_index_sort, row_index]
+ topk_index_sort = topk_index[0:k, :][topk_index_sort, row_index]
+ else:
+ column_index = np.arange(x.shape[1 - axis])[:, None]
+ topk_index = np.argpartition(x, k, axis=axis)[:, 0:k]
+ topk_data = x[column_index, topk_index]
+ topk_data = -topk_data if largest else topk_data
+ topk_index_sort = np.argsort(topk_data, axis=axis)
+ topk_data_sort = topk_data[column_index, topk_index_sort]
+ topk_index_sort = topk_index[:, 0:k][column_index, topk_index_sort]
+
+ return topk_data_sort, topk_index_sort
+
+
+class ATSSAssigner(object):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with `0` or a positive integer
+ indicating the ground truth index.
+
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+
+ Args:
+ topk (float): number of bbox selected in each level
+ """
+
+ def __init__(self, topk=9):
+ self.topk = topk
+
+ def __call__(self,
+ bboxes,
+ num_level_bboxes,
+ gt_bboxes,
+ gt_bboxes_ignore=None,
+ gt_labels=None):
+ """Assign gt to bboxes.
+ The assignment is done in following steps
+ 1. compute iou between all bbox (bbox of all pyramid levels) and gt
+ 2. compute center distance between all bbox and gt
+ 3. on each pyramid level, for each gt, select k bbox whose center
+ are closest to the gt center, so we total select k*l bbox as
+ candidates for each gt
+ 4. get corresponding iou for the these candidates, and compute the
+ mean and std, set mean + std as the iou threshold
+ 5. select these candidates whose iou are greater than or equal to
+ the threshold as postive
+ 6. limit the positive sample's center in gt
+ Args:
+ bboxes (np.array): Bounding boxes to be assigned, shape(n, 4).
+ num_level_bboxes (List): num of bboxes in each level
+ gt_bboxes (np.array): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (np.array, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (np.array, optional): Label of gt_bboxes, shape (k, ).
+ """
+ bboxes = bboxes[:, :4]
+ num_gt, num_bboxes = gt_bboxes.shape[0], bboxes.shape[0]
+
+ # assign 0 by default
+ assigned_gt_inds = np.zeros((num_bboxes, ), dtype=np.int64)
+
+ if num_gt == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = np.zeros((num_bboxes, ))
+ if num_gt == 0:
+ # No truth, assign everything to background
+ assigned_gt_inds[:] = 0
+ if not np.any(gt_labels):
+ assigned_labels = None
+ else:
+ assigned_labels = -np.ones((num_bboxes, ), dtype=np.int64)
+ return assigned_gt_inds, max_overlaps
+
+ # compute iou between all bbox and gt
+ overlaps = bbox_overlaps(bboxes, gt_bboxes)
+ # compute center distance between all bbox and gt
+ gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
+ gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
+ gt_points = np.stack((gt_cx, gt_cy), axis=1)
+
+ bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
+ bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
+ bboxes_points = np.stack((bboxes_cx, bboxes_cy), axis=1)
+
+ distances = np.sqrt(
+ np.power((bboxes_points[:, None, :] - gt_points[None, :, :]), 2)
+ .sum(-1))
+
+ # Selecting candidates based on the center distance
+ candidate_idxs = []
+ start_idx = 0
+ for bboxes_per_level in num_level_bboxes:
+ # on each pyramid level, for each gt,
+ # select k bbox whose center are closest to the gt center
+ end_idx = start_idx + bboxes_per_level
+ distances_per_level = distances[start_idx:end_idx, :]
+ selectable_k = min(self.topk, bboxes_per_level)
+ _, topk_idxs_per_level = topk_(
+ distances_per_level, selectable_k, axis=0, largest=False)
+ candidate_idxs.append(topk_idxs_per_level + start_idx)
+ start_idx = end_idx
+ candidate_idxs = np.concatenate(candidate_idxs, axis=0)
+
+ # get corresponding iou for the these candidates, and compute the
+ # mean and std, set mean + std as the iou threshold
+ candidate_overlaps = overlaps[candidate_idxs, np.arange(num_gt)]
+ overlaps_mean_per_gt = candidate_overlaps.mean(0)
+ overlaps_std_per_gt = candidate_overlaps.std(0)
+ overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
+
+ is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]
+
+ # limit the positive sample's center in gt
+ for gt_idx in range(num_gt):
+ candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
+ ep_bboxes_cx = np.broadcast_to(
+ bboxes_cx.reshape(1, -1), [num_gt, num_bboxes]).reshape(-1)
+ ep_bboxes_cy = np.broadcast_to(
+ bboxes_cy.reshape(1, -1), [num_gt, num_bboxes]).reshape(-1)
+ candidate_idxs = candidate_idxs.reshape(-1)
+
+ # calculate the left, top, right, bottom distance between positive
+ # bbox center and gt side
+ l_ = ep_bboxes_cx[candidate_idxs].reshape(-1, num_gt) - gt_bboxes[:, 0]
+ t_ = ep_bboxes_cy[candidate_idxs].reshape(-1, num_gt) - gt_bboxes[:, 1]
+ r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].reshape(-1, num_gt)
+ b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].reshape(-1, num_gt)
+ is_in_gts = np.stack([l_, t_, r_, b_], axis=1).min(axis=1) > 0.01
+ is_pos = is_pos & is_in_gts
+
+ # if an anchor box is assigned to multiple gts,
+ # the one with the highest IoU will be selected.
+ overlaps_inf = -np.inf * np.ones_like(overlaps).T.reshape(-1)
+ index = candidate_idxs.reshape(-1)[is_pos.reshape(-1)]
+ overlaps_inf[index] = overlaps.T.reshape(-1)[index]
+ overlaps_inf = overlaps_inf.reshape(num_gt, -1).T
+
+ max_overlaps = overlaps_inf.max(axis=1)
+ argmax_overlaps = overlaps_inf.argmax(axis=1)
+ assigned_gt_inds[max_overlaps !=
+ -np.inf] = argmax_overlaps[max_overlaps != -np.inf] + 1
+
+ return assigned_gt_inds, max_overlaps
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/autoaugment_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/autoaugment_utils.py
new file mode 100644
index 000000000..cfa89d374
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/autoaugment_utils.py
@@ -0,0 +1,1586 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# Reference:
+# https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/autoaugment_utils.py
+"""AutoAugment util file."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import inspect
+import math
+from PIL import Image, ImageEnhance
+import numpy as np
+import cv2
+from copy import deepcopy
+
+# This signifies the max integer that the controller RNN could predict for the
+# augmentation scheme.
+_MAX_LEVEL = 10.
+
+# Represents an invalid bounding box that is used for checking for padding
+# lists of bounding box coordinates for a few augmentation operations
+_INVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]]
+
+
+def policy_v0():
+ """Autoaugment policy that was used in AutoAugment Detection Paper."""
+ # Each tuple is an augmentation operation of the form
+ # (operation, probability, magnitude). Each element in policy is a
+ # sub-policy that will be applied sequentially on the image.
+ policy = [
+ [('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
+ [('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
+ [('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
+ [('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
+ [('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
+ ]
+ return policy
+
+
+def policy_v1():
+ """Autoaugment policy that was used in AutoAugment Detection Paper."""
+ # Each tuple is an augmentation operation of the form
+ # (operation, probability, magnitude). Each element in policy is a
+ # sub-policy that will be applied sequentially on the image.
+ policy = [
+ [('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
+ [('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
+ [('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
+ [('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
+ [('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
+ [('Color', 0.0, 0), ('ShearX_Only_BBoxes', 0.8, 4)],
+ [('ShearY_Only_BBoxes', 0.8, 2), ('Flip_Only_BBoxes', 0.0, 10)],
+ [('Equalize', 0.6, 10), ('TranslateX_BBox', 0.2, 2)],
+ [('Color', 1.0, 10), ('TranslateY_Only_BBoxes', 0.4, 6)],
+ [('Rotate_BBox', 0.8, 10), ('Contrast', 0.0, 10)], # ,
+ [('Cutout', 0.2, 2), ('Brightness', 0.8, 10)],
+ [('Color', 1.0, 6), ('Equalize', 1.0, 2)],
+ [('Cutout_Only_BBoxes', 0.4, 6), ('TranslateY_Only_BBoxes', 0.8, 2)],
+ [('Color', 0.2, 8), ('Rotate_BBox', 0.8, 10)],
+ [('Sharpness', 0.4, 4), ('TranslateY_Only_BBoxes', 0.0, 4)],
+ [('Sharpness', 1.0, 4), ('SolarizeAdd', 0.4, 4)],
+ [('Rotate_BBox', 1.0, 8), ('Sharpness', 0.2, 8)],
+ [('ShearY_BBox', 0.6, 10), ('Equalize_Only_BBoxes', 0.6, 8)],
+ [('ShearX_BBox', 0.2, 6), ('TranslateY_Only_BBoxes', 0.2, 10)],
+ [('SolarizeAdd', 0.6, 8), ('Brightness', 0.8, 10)],
+ ]
+ return policy
+
+
+def policy_vtest():
+ """Autoaugment test policy for debugging."""
+ # Each tuple is an augmentation operation of the form
+ # (operation, probability, magnitude). Each element in policy is a
+ # sub-policy that will be applied sequentially on the image.
+ policy = [[('TranslateX_BBox', 1.0, 4), ('Equalize', 1.0, 10)], ]
+ return policy
+
+
+def policy_v2():
+ """Additional policy that performs well on object detection."""
+ # Each tuple is an augmentation operation of the form
+ # (operation, probability, magnitude). Each element in policy is a
+ # sub-policy that will be applied sequentially on the image.
+ policy = [
+ [('Color', 0.0, 6), ('Cutout', 0.6, 8), ('Sharpness', 0.4, 8)],
+ [('Rotate_BBox', 0.4, 8), ('Sharpness', 0.4, 2),
+ ('Rotate_BBox', 0.8, 10)],
+ [('TranslateY_BBox', 1.0, 8), ('AutoContrast', 0.8, 2)],
+ [('AutoContrast', 0.4, 6), ('ShearX_BBox', 0.8, 8),
+ ('Brightness', 0.0, 10)],
+ [('SolarizeAdd', 0.2, 6), ('Contrast', 0.0, 10),
+ ('AutoContrast', 0.6, 0)],
+ [('Cutout', 0.2, 0), ('Solarize', 0.8, 8), ('Color', 1.0, 4)],
+ [('TranslateY_BBox', 0.0, 4), ('Equalize', 0.6, 8),
+ ('Solarize', 0.0, 10)],
+ [('TranslateY_BBox', 0.2, 2), ('ShearY_BBox', 0.8, 8),
+ ('Rotate_BBox', 0.8, 8)],
+ [('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
+ [('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6),
+ ('Rotate_BBox', 0.6, 6)],
+ [('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)],
+ [('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6),
+ ('ShearY_BBox', 0.6, 8)],
+ [('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
+ ('Brightness', 0.2, 2)],
+ [('TranslateY_BBox', 0.4, 8), ('Solarize', 0.4, 6),
+ ('SolarizeAdd', 0.2, 10)],
+ [('Contrast', 1.0, 10), ('SolarizeAdd', 0.2, 8), ('Equalize', 0.2, 4)],
+ ]
+ return policy
+
+
+def policy_v3():
+ """"Additional policy that performs well on object detection."""
+ # Each tuple is an augmentation operation of the form
+ # (operation, probability, magnitude). Each element in policy is a
+ # sub-policy that will be applied sequentially on the image.
+ policy = [
+ [('Posterize', 0.8, 2), ('TranslateX_BBox', 1.0, 8)],
+ [('BBox_Cutout', 0.2, 10), ('Sharpness', 1.0, 8)],
+ [('Rotate_BBox', 0.6, 8), ('Rotate_BBox', 0.8, 10)],
+ [('Equalize', 0.8, 10), ('AutoContrast', 0.2, 10)],
+ [('SolarizeAdd', 0.2, 2), ('TranslateY_BBox', 0.2, 8)],
+ [('Sharpness', 0.0, 2), ('Color', 0.4, 8)],
+ [('Equalize', 1.0, 8), ('TranslateY_BBox', 1.0, 8)],
+ [('Posterize', 0.6, 2), ('Rotate_BBox', 0.0, 10)],
+ [('AutoContrast', 0.6, 0), ('Rotate_BBox', 1.0, 6)],
+ [('Equalize', 0.0, 4), ('Cutout', 0.8, 10)],
+ [('Brightness', 1.0, 2), ('TranslateY_BBox', 1.0, 6)],
+ [('Contrast', 0.0, 2), ('ShearY_BBox', 0.8, 0)],
+ [('AutoContrast', 0.8, 10), ('Contrast', 0.2, 10)],
+ [('Rotate_BBox', 1.0, 10), ('Cutout', 1.0, 10)],
+ [('SolarizeAdd', 0.8, 6), ('Equalize', 0.8, 8)],
+ ]
+ return policy
+
+
+def _equal(val1, val2, eps=1e-8):
+ return abs(val1 - val2) <= eps
+
+
+def blend(image1, image2, factor):
+ """Blend image1 and image2 using 'factor'.
+
+ Factor can be above 0.0. A value of 0.0 means only image1 is used.
+ A value of 1.0 means only image2 is used. A value between 0.0 and
+ 1.0 means we linearly interpolate the pixel values between the two
+ images. A value greater than 1.0 "extrapolates" the difference
+ between the two pixel values, and we clip the results to values
+ between 0 and 255.
+
+ Args:
+ image1: An image Tensor of type uint8.
+ image2: An image Tensor of type uint8.
+ factor: A floating point value above 0.0.
+
+ Returns:
+ A blended image Tensor of type uint8.
+ """
+ if factor == 0.0:
+ return image1
+ if factor == 1.0:
+ return image2
+
+ image1 = image1.astype(np.float32)
+ image2 = image2.astype(np.float32)
+
+ difference = image2 - image1
+ scaled = factor * difference
+
+ # Do addition in float.
+ temp = image1 + scaled
+
+ # Interpolate
+ if factor > 0.0 and factor < 1.0:
+ # Interpolation means we always stay within 0 and 255.
+ return temp.astype(np.uint8)
+
+ # Extrapolate:
+ #
+ # We need to clip and then cast.
+ return np.clip(temp, a_min=0, a_max=255).astype(np.uint8)
+
+
+def cutout(image, pad_size, replace=0):
+ """Apply cutout (https://arxiv.org/abs/1708.04552) to image.
+
+ This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
+ a random location within `img`. The pixel values filled in will be of the
+ value `replace`. The located where the mask will be applied is randomly
+ chosen uniformly over the whole image.
+
+ Args:
+ image: An image Tensor of type uint8.
+ pad_size: Specifies how big the zero mask that will be generated is that
+ is applied to the image. The mask will be of size
+ (2*pad_size x 2*pad_size).
+ replace: What pixel value to fill in the image in the area that has
+ the cutout mask applied to it.
+
+ Returns:
+ An image Tensor that is of type uint8.
+ Example:
+ img = cv2.imread( "/home/vis/gry/train/img_data/test.jpg", cv2.COLOR_BGR2RGB )
+ new_img = cutout(img, pad_size=50, replace=0)
+ """
+ image_height, image_width = image.shape[0], image.shape[1]
+
+ cutout_center_height = np.random.randint(low=0, high=image_height)
+ cutout_center_width = np.random.randint(low=0, high=image_width)
+
+ lower_pad = np.maximum(0, cutout_center_height - pad_size)
+ upper_pad = np.maximum(0, image_height - cutout_center_height - pad_size)
+ left_pad = np.maximum(0, cutout_center_width - pad_size)
+ right_pad = np.maximum(0, image_width - cutout_center_width - pad_size)
+
+ cutout_shape = [
+ image_height - (lower_pad + upper_pad),
+ image_width - (left_pad + right_pad)
+ ]
+ padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
+ mask = np.pad(np.zeros(
+ cutout_shape, dtype=image.dtype),
+ padding_dims,
+ 'constant',
+ constant_values=1)
+ mask = np.expand_dims(mask, -1)
+ mask = np.tile(mask, [1, 1, 3])
+ image = np.where(
+ np.equal(mask, 0),
+ np.ones_like(
+ image, dtype=image.dtype) * replace,
+ image)
+ return image.astype(np.uint8)
+
+
+def solarize(image, threshold=128):
+ # For each pixel in the image, select the pixel
+ # if the value is less than the threshold.
+ # Otherwise, subtract 255 from the pixel.
+ return np.where(image < threshold, image, 255 - image)
+
+
+def solarize_add(image, addition=0, threshold=128):
+ # For each pixel in the image less than threshold
+ # we add 'addition' amount to it and then clip the
+ # pixel value to be between 0 and 255. The value
+ # of 'addition' is between -128 and 128.
+ added_image = image.astype(np.int64) + addition
+ added_image = np.clip(added_image, a_min=0, a_max=255).astype(np.uint8)
+ return np.where(image < threshold, added_image, image)
+
+
+def color(image, factor):
+ """use cv2 to deal"""
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ degenerate = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
+ return blend(degenerate, image, factor)
+
+
+# refer to https://github.com/4uiiurz1/pytorch-auto-augment/blob/024b2eac4140c38df8342f09998e307234cafc80/auto_augment.py#L197
+def contrast(img, factor):
+ img = ImageEnhance.Contrast(Image.fromarray(img)).enhance(factor)
+ return np.array(img)
+
+
+def brightness(image, factor):
+ """Equivalent of PIL Brightness."""
+ degenerate = np.zeros_like(image)
+ return blend(degenerate, image, factor)
+
+
+def posterize(image, bits):
+ """Equivalent of PIL Posterize."""
+ shift = 8 - bits
+ return np.left_shift(np.right_shift(image, shift), shift)
+
+
+def rotate(image, degrees, replace):
+ """Rotates the image by degrees either clockwise or counterclockwise.
+
+ Args:
+ image: An image Tensor of type uint8.
+ degrees: Float, a scalar angle in degrees to rotate all images by. If
+ degrees is positive the image will be rotated clockwise otherwise it will
+ be rotated counterclockwise.
+ replace: A one or three value 1D tensor to fill empty pixels caused by
+ the rotate operation.
+
+ Returns:
+ The rotated version of image.
+ """
+ image = wrap(image)
+ image = Image.fromarray(image)
+ image = image.rotate(degrees)
+ image = np.array(image, dtype=np.uint8)
+ return unwrap(image, replace)
+
+
+def random_shift_bbox(image,
+ bbox,
+ pixel_scaling,
+ replace,
+ new_min_bbox_coords=None):
+ """Move the bbox and the image content to a slightly new random location.
+
+ Args:
+ image: 3D uint8 Tensor.
+ bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
+ of type float that represents the normalized coordinates between 0 and 1.
+ The potential values for the new min corner of the bbox will be between
+ [old_min - pixel_scaling * bbox_height/2,
+ old_min - pixel_scaling * bbox_height/2].
+ pixel_scaling: A float between 0 and 1 that specifies the pixel range
+ that the new bbox location will be sampled from.
+ replace: A one or three value 1D tensor to fill empty pixels.
+ new_min_bbox_coords: If not None, then this is a tuple that specifies the
+ (min_y, min_x) coordinates of the new bbox. Normally this is randomly
+ specified, but this allows it to be manually set. The coordinates are
+ the absolute coordinates between 0 and image height/width and are int32.
+
+ Returns:
+ The new image that will have the shifted bbox location in it along with
+ the new bbox that contains the new coordinates.
+ """
+ # Obtains image height and width and create helper clip functions.
+ image_height, image_width = image.shape[0], image.shape[1]
+ image_height = float(image_height)
+ image_width = float(image_width)
+
+ def clip_y(val):
+ return np.clip(val, a_min=0, a_max=image_height - 1).astype(np.int32)
+
+ def clip_x(val):
+ return np.clip(val, a_min=0, a_max=image_width - 1).astype(np.int32)
+
+ # Convert bbox to pixel coordinates.
+ min_y = int(image_height * bbox[0])
+ min_x = int(image_width * bbox[1])
+ max_y = clip_y(image_height * bbox[2])
+ max_x = clip_x(image_width * bbox[3])
+
+ bbox_height, bbox_width = (max_y - min_y + 1, max_x - min_x + 1)
+ image_height = int(image_height)
+ image_width = int(image_width)
+
+ # Select the new min/max bbox ranges that are used for sampling the
+ # new min x/y coordinates of the shifted bbox.
+ minval_y = clip_y(min_y - np.int32(pixel_scaling * float(bbox_height) /
+ 2.0))
+ maxval_y = clip_y(min_y + np.int32(pixel_scaling * float(bbox_height) /
+ 2.0))
+ minval_x = clip_x(min_x - np.int32(pixel_scaling * float(bbox_width) / 2.0))
+ maxval_x = clip_x(min_x + np.int32(pixel_scaling * float(bbox_width) / 2.0))
+
+ # Sample and calculate the new unclipped min/max coordinates of the new bbox.
+ if new_min_bbox_coords is None:
+ unclipped_new_min_y = np.random.randint(
+ low=minval_y, high=maxval_y, dtype=np.int32)
+ unclipped_new_min_x = np.random.randint(
+ low=minval_x, high=maxval_x, dtype=np.int32)
+ else:
+ unclipped_new_min_y, unclipped_new_min_x = (
+ clip_y(new_min_bbox_coords[0]), clip_x(new_min_bbox_coords[1]))
+ unclipped_new_max_y = unclipped_new_min_y + bbox_height - 1
+ unclipped_new_max_x = unclipped_new_min_x + bbox_width - 1
+
+ # Determine if any of the new bbox was shifted outside the current image.
+ # This is used for determining if any of the original bbox content should be
+ # discarded.
+ new_min_y, new_min_x, new_max_y, new_max_x = (
+ clip_y(unclipped_new_min_y), clip_x(unclipped_new_min_x),
+ clip_y(unclipped_new_max_y), clip_x(unclipped_new_max_x))
+ shifted_min_y = (new_min_y - unclipped_new_min_y) + min_y
+ shifted_max_y = max_y - (unclipped_new_max_y - new_max_y)
+ shifted_min_x = (new_min_x - unclipped_new_min_x) + min_x
+ shifted_max_x = max_x - (unclipped_new_max_x - new_max_x)
+
+ # Create the new bbox tensor by converting pixel integer values to floats.
+ new_bbox = np.stack([
+ float(new_min_y) / float(image_height), float(new_min_x) /
+ float(image_width), float(new_max_y) / float(image_height),
+ float(new_max_x) / float(image_width)
+ ])
+
+ # Copy the contents in the bbox and fill the old bbox location
+ # with gray (128).
+ bbox_content = image[shifted_min_y:shifted_max_y + 1, shifted_min_x:
+ shifted_max_x + 1, :]
+
+ def mask_and_add_image(min_y_, min_x_, max_y_, max_x_, mask, content_tensor,
+ image_):
+ """Applies mask to bbox region in image then adds content_tensor to it."""
+ mask = np.pad(mask, [[min_y_, (image_height - 1) - max_y_],
+ [min_x_, (image_width - 1) - max_x_], [0, 0]],
+ 'constant',
+ constant_values=1)
+
+ content_tensor = np.pad(content_tensor,
+ [[min_y_, (image_height - 1) - max_y_],
+ [min_x_, (image_width - 1) - max_x_], [0, 0]],
+ 'constant',
+ constant_values=0)
+ return image_ * mask + content_tensor
+
+ # Zero out original bbox location.
+ mask = np.zeros_like(image)[min_y:max_y + 1, min_x:max_x + 1, :]
+ grey_tensor = np.zeros_like(mask) + replace[0]
+ image = mask_and_add_image(min_y, min_x, max_y, max_x, mask, grey_tensor,
+ image)
+
+ # Fill in bbox content to new bbox location.
+ mask = np.zeros_like(bbox_content)
+ image = mask_and_add_image(new_min_y, new_min_x, new_max_y, new_max_x, mask,
+ bbox_content, image)
+
+ return image.astype(np.uint8), new_bbox
+
+
+def _clip_bbox(min_y, min_x, max_y, max_x):
+ """Clip bounding box coordinates between 0 and 1.
+
+ Args:
+ min_y: Normalized bbox coordinate of type float between 0 and 1.
+ min_x: Normalized bbox coordinate of type float between 0 and 1.
+ max_y: Normalized bbox coordinate of type float between 0 and 1.
+ max_x: Normalized bbox coordinate of type float between 0 and 1.
+
+ Returns:
+ Clipped coordinate values between 0 and 1.
+ """
+ min_y = np.clip(min_y, a_min=0, a_max=1.0)
+ min_x = np.clip(min_x, a_min=0, a_max=1.0)
+ max_y = np.clip(max_y, a_min=0, a_max=1.0)
+ max_x = np.clip(max_x, a_min=0, a_max=1.0)
+ return min_y, min_x, max_y, max_x
+
+
+def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
+ """Adjusts bbox coordinates to make sure the area is > 0.
+
+ Args:
+ min_y: Normalized bbox coordinate of type float between 0 and 1.
+ min_x: Normalized bbox coordinate of type float between 0 and 1.
+ max_y: Normalized bbox coordinate of type float between 0 and 1.
+ max_x: Normalized bbox coordinate of type float between 0 and 1.
+ delta: Float, this is used to create a gap of size 2 * delta between
+ bbox min/max coordinates that are the same on the boundary.
+ This prevents the bbox from having an area of zero.
+
+ Returns:
+ Tuple of new bbox coordinates between 0 and 1 that will now have a
+ guaranteed area > 0.
+ """
+ height = max_y - min_y
+ width = max_x - min_x
+
+ def _adjust_bbox_boundaries(min_coord, max_coord):
+ # Make sure max is never 0 and min is never 1.
+ max_coord = np.maximum(max_coord, 0.0 + delta)
+ min_coord = np.minimum(min_coord, 1.0 - delta)
+ return min_coord, max_coord
+
+ if _equal(height, 0):
+ min_y, max_y = _adjust_bbox_boundaries(min_y, max_y)
+
+ if _equal(width, 0):
+ min_x, max_x = _adjust_bbox_boundaries(min_x, max_x)
+
+ return min_y, min_x, max_y, max_x
+
+
+def _scale_bbox_only_op_probability(prob):
+ """Reduce the probability of the bbox-only operation.
+
+ Probability is reduced so that we do not distort the content of too many
+ bounding boxes that are close to each other. The value of 3.0 was a chosen
+ hyper parameter when designing the autoaugment algorithm that we found
+ empirically to work well.
+
+ Args:
+ prob: Float that is the probability of applying the bbox-only operation.
+
+ Returns:
+ Reduced probability.
+ """
+ return prob / 3.0
+
+
+def _apply_bbox_augmentation(image, bbox, augmentation_func, *args):
+ """Applies augmentation_func to the subsection of image indicated by bbox.
+
+ Args:
+ image: 3D uint8 Tensor.
+ bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
+ of type float that represents the normalized coordinates between 0 and 1.
+ augmentation_func: Augmentation function that will be applied to the
+ subsection of image.
+ *args: Additional parameters that will be passed into augmentation_func
+ when it is called.
+
+ Returns:
+ A modified version of image, where the bbox location in the image will
+ have `ugmentation_func applied to it.
+ """
+ image_height = image.shape[0]
+ image_width = image.shape[1]
+
+ min_y = int(image_height * bbox[0])
+ min_x = int(image_width * bbox[1])
+ max_y = int(image_height * bbox[2])
+ max_x = int(image_width * bbox[3])
+
+ # Clip to be sure the max values do not fall out of range.
+ max_y = np.minimum(max_y, image_height - 1)
+ max_x = np.minimum(max_x, image_width - 1)
+
+ # Get the sub-tensor that is the image within the bounding box region.
+ bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :]
+
+ # Apply the augmentation function to the bbox portion of the image.
+ augmented_bbox_content = augmentation_func(bbox_content, *args)
+
+ # Pad the augmented_bbox_content and the mask to match the shape of original
+ # image.
+ augmented_bbox_content = np.pad(
+ augmented_bbox_content, [[min_y, (image_height - 1) - max_y],
+ [min_x, (image_width - 1) - max_x], [0, 0]],
+ 'constant',
+ constant_values=1)
+
+ # Create a mask that will be used to zero out a part of the original image.
+ mask_tensor = np.zeros_like(bbox_content)
+
+ mask_tensor = np.pad(mask_tensor,
+ [[min_y, (image_height - 1) - max_y],
+ [min_x, (image_width - 1) - max_x], [0, 0]],
+ 'constant',
+ constant_values=1)
+ # Replace the old bbox content with the new augmented content.
+ image = image * mask_tensor + augmented_bbox_content
+ return image.astype(np.uint8)
+
+
+def _concat_bbox(bbox, bboxes):
+ """Helper function that concates bbox to bboxes along the first dimension."""
+
+ # Note if all elements in bboxes are -1 (_INVALID_BOX), then this means
+ # we discard bboxes and start the bboxes Tensor with the current bbox.
+ bboxes_sum_check = np.sum(bboxes)
+ bbox = np.expand_dims(bbox, 0)
+ # This check will be true when it is an _INVALID_BOX
+ if _equal(bboxes_sum_check, -4):
+ bboxes = bbox
+ else:
+ bboxes = np.concatenate([bboxes, bbox], 0)
+ return bboxes
+
+
+def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob,
+ augmentation_func, func_changes_bbox,
+ *args):
+ """Applies _apply_bbox_augmentation with probability prob.
+
+ Args:
+ image: 3D uint8 Tensor.
+ bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
+ of type float that represents the normalized coordinates between 0 and 1.
+ new_bboxes: 2D Tensor that is a list of the bboxes in the image after they
+ have been altered by aug_func. These will only be changed when
+ func_changes_bbox is set to true. Each bbox has 4 elements
+ (min_y, min_x, max_y, max_x) of type float that are the normalized
+ bbox coordinates between 0 and 1.
+ prob: Float that is the probability of applying _apply_bbox_augmentation.
+ augmentation_func: Augmentation function that will be applied to the
+ subsection of image.
+ func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
+ to image.
+ *args: Additional parameters that will be passed into augmentation_func
+ when it is called.
+
+ Returns:
+ A tuple. Fist element is a modified version of image, where the bbox
+ location in the image will have augmentation_func applied to it if it is
+ chosen to be called with probability `prob`. The second element is a
+ Tensor of Tensors of length 4 that will contain the altered bbox after
+ applying augmentation_func.
+ """
+ should_apply_op = (np.random.rand() + prob >= 1)
+ if func_changes_bbox:
+ if should_apply_op:
+ augmented_image, bbox = augmentation_func(image, bbox, *args)
+ else:
+ augmented_image, bbox = (image, bbox)
+ else:
+ if should_apply_op:
+ augmented_image = _apply_bbox_augmentation(image, bbox,
+ augmentation_func, *args)
+ else:
+ augmented_image = image
+ new_bboxes = _concat_bbox(bbox, new_bboxes)
+ return augmented_image.astype(np.uint8), new_bboxes
+
+
+def _apply_multi_bbox_augmentation(image, bboxes, prob, aug_func,
+ func_changes_bbox, *args):
+ """Applies aug_func to the image for each bbox in bboxes.
+
+ Args:
+ image: 3D uint8 Tensor.
+ bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
+ has 4 elements (min_y, min_x, max_y, max_x) of type float.
+ prob: Float that is the probability of applying aug_func to a specific
+ bounding box within the image.
+ aug_func: Augmentation function that will be applied to the
+ subsections of image indicated by the bbox values in bboxes.
+ func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
+ to image.
+ *args: Additional parameters that will be passed into augmentation_func
+ when it is called.
+
+ Returns:
+ A modified version of image, where each bbox location in the image will
+ have augmentation_func applied to it if it is chosen to be called with
+ probability prob independently across all bboxes. Also the final
+ bboxes are returned that will be unchanged if func_changes_bbox is set to
+ false and if true, the new altered ones will be returned.
+ """
+ # Will keep track of the new altered bboxes after aug_func is repeatedly
+ # applied. The -1 values are a dummy value and this first Tensor will be
+ # removed upon appending the first real bbox.
+ new_bboxes = np.array(_INVALID_BOX)
+
+ # If the bboxes are empty, then just give it _INVALID_BOX. The result
+ # will be thrown away.
+ bboxes = np.array((_INVALID_BOX)) if bboxes.size == 0 else bboxes
+
+ assert bboxes.shape[1] == 4, "bboxes.shape[1] must be 4!!!!"
+
+ # pylint:disable=g-long-lambda
+ # pylint:disable=line-too-long
+ wrapped_aug_func = lambda _image, bbox, _new_bboxes: _apply_bbox_augmentation_wrapper(_image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args)
+ # pylint:enable=g-long-lambda
+ # pylint:enable=line-too-long
+
+ # Setup the while_loop.
+ num_bboxes = bboxes.shape[0] # We loop until we go over all bboxes.
+ idx = 0 # Counter for the while loop.
+
+ # Conditional function when to end the loop once we go over all bboxes
+ # images_and_bboxes contain (_image, _new_bboxes)
+ def cond(_idx, _images_and_bboxes):
+ return _idx < num_bboxes
+
+ # Shuffle the bboxes so that the augmentation order is not deterministic if
+ # we are not changing the bboxes with aug_func.
+ # if not func_changes_bbox:
+ # print(bboxes)
+ # loop_bboxes = np.take(bboxes,np.random.permutation(bboxes.shape[0]),axis=0)
+ # print(loop_bboxes)
+ # else:
+ # loop_bboxes = bboxes
+ # we can not shuffle the bbox because it does not contain class information here
+ loop_bboxes = deepcopy(bboxes)
+
+ # Main function of while_loop where we repeatedly apply augmentation on the
+ # bboxes in the image.
+ # pylint:disable=g-long-lambda
+ body = lambda _idx, _images_and_bboxes: [
+ _idx + 1, wrapped_aug_func(_images_and_bboxes[0],
+ loop_bboxes[_idx],
+ _images_and_bboxes[1])]
+ while (cond(idx, (image, new_bboxes))):
+ idx, (image, new_bboxes) = body(idx, (image, new_bboxes))
+
+ # Either return the altered bboxes or the original ones depending on if
+ # we altered them in anyway.
+ if func_changes_bbox:
+ final_bboxes = new_bboxes
+ else:
+ final_bboxes = bboxes
+ return image, final_bboxes
+
+
+def _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, aug_func,
+ func_changes_bbox, *args):
+ """Checks to be sure num bboxes > 0 before calling inner function."""
+ num_bboxes = len(bboxes)
+ new_image = deepcopy(image)
+ new_bboxes = deepcopy(bboxes)
+ if num_bboxes != 0:
+ new_image, new_bboxes = _apply_multi_bbox_augmentation(
+ new_image, new_bboxes, prob, aug_func, func_changes_bbox, *args)
+ return new_image, new_bboxes
+
+
+def rotate_only_bboxes(image, bboxes, prob, degrees, replace):
+ """Apply rotate to each bbox in the image with probability prob."""
+ func_changes_bbox = False
+ prob = _scale_bbox_only_op_probability(prob)
+ return _apply_multi_bbox_augmentation_wrapper(
+ image, bboxes, prob, rotate, func_changes_bbox, degrees, replace)
+
+
+def shear_x_only_bboxes(image, bboxes, prob, level, replace):
+ """Apply shear_x to each bbox in the image with probability prob."""
+ func_changes_bbox = False
+ prob = _scale_bbox_only_op_probability(prob)
+ return _apply_multi_bbox_augmentation_wrapper(
+ image, bboxes, prob, shear_x, func_changes_bbox, level, replace)
+
+
+def shear_y_only_bboxes(image, bboxes, prob, level, replace):
+ """Apply shear_y to each bbox in the image with probability prob."""
+ func_changes_bbox = False
+ prob = _scale_bbox_only_op_probability(prob)
+ return _apply_multi_bbox_augmentation_wrapper(
+ image, bboxes, prob, shear_y, func_changes_bbox, level, replace)
+
+
+def translate_x_only_bboxes(image, bboxes, prob, pixels, replace):
+ """Apply translate_x to each bbox in the image with probability prob."""
+ func_changes_bbox = False
+ prob = _scale_bbox_only_op_probability(prob)
+ return _apply_multi_bbox_augmentation_wrapper(
+ image, bboxes, prob, translate_x, func_changes_bbox, pixels, replace)
+
+
+def translate_y_only_bboxes(image, bboxes, prob, pixels, replace):
+ """Apply translate_y to each bbox in the image with probability prob."""
+ func_changes_bbox = False
+ prob = _scale_bbox_only_op_probability(prob)
+ return _apply_multi_bbox_augmentation_wrapper(
+ image, bboxes, prob, translate_y, func_changes_bbox, pixels, replace)
+
+
+def flip_only_bboxes(image, bboxes, prob):
+ """Apply flip_lr to each bbox in the image with probability prob."""
+ func_changes_bbox = False
+ prob = _scale_bbox_only_op_probability(prob)
+ return _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob,
+ np.fliplr, func_changes_bbox)
+
+
+def solarize_only_bboxes(image, bboxes, prob, threshold):
+ """Apply solarize to each bbox in the image with probability prob."""
+ func_changes_bbox = False
+ prob = _scale_bbox_only_op_probability(prob)
+ return _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, solarize,
+ func_changes_bbox, threshold)
+
+
+def equalize_only_bboxes(image, bboxes, prob):
+ """Apply equalize to each bbox in the image with probability prob."""
+ func_changes_bbox = False
+ prob = _scale_bbox_only_op_probability(prob)
+ return _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, equalize,
+ func_changes_bbox)
+
+
+def cutout_only_bboxes(image, bboxes, prob, pad_size, replace):
+ """Apply cutout to each bbox in the image with probability prob."""
+ func_changes_bbox = False
+ prob = _scale_bbox_only_op_probability(prob)
+ return _apply_multi_bbox_augmentation_wrapper(
+ image, bboxes, prob, cutout, func_changes_bbox, pad_size, replace)
+
+
+def _rotate_bbox(bbox, image_height, image_width, degrees):
+ """Rotates the bbox coordinated by degrees.
+
+ Args:
+ bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
+ of type float that represents the normalized coordinates between 0 and 1.
+ image_height: Int, height of the image.
+ image_width: Int, height of the image.
+ degrees: Float, a scalar angle in degrees to rotate all images by. If
+ degrees is positive the image will be rotated clockwise otherwise it will
+ be rotated counterclockwise.
+
+ Returns:
+ A tensor of the same shape as bbox, but now with the rotated coordinates.
+ """
+ image_height, image_width = (float(image_height), float(image_width))
+
+ # Convert from degrees to radians.
+ degrees_to_radians = math.pi / 180.0
+ radians = degrees * degrees_to_radians
+
+ # Translate the bbox to the center of the image and turn the normalized 0-1
+ # coordinates to absolute pixel locations.
+ # Y coordinates are made negative as the y axis of images goes down with
+ # increasing pixel values, so we negate to make sure x axis and y axis points
+ # are in the traditionally positive direction.
+ min_y = -int(image_height * (bbox[0] - 0.5))
+ min_x = int(image_width * (bbox[1] - 0.5))
+ max_y = -int(image_height * (bbox[2] - 0.5))
+ max_x = int(image_width * (bbox[3] - 0.5))
+ coordinates = np.stack([[min_y, min_x], [min_y, max_x], [max_y, min_x],
+ [max_y, max_x]]).astype(np.float32)
+ # Rotate the coordinates according to the rotation matrix clockwise if
+ # radians is positive, else negative
+ rotation_matrix = np.stack([[math.cos(radians), math.sin(radians)],
+ [-math.sin(radians), math.cos(radians)]])
+ new_coords = np.matmul(rotation_matrix,
+ np.transpose(coordinates)).astype(np.int32)
+
+ # Find min/max values and convert them back to normalized 0-1 floats.
+ min_y = -(float(np.max(new_coords[0, :])) / image_height - 0.5)
+ min_x = float(np.min(new_coords[1, :])) / image_width + 0.5
+ max_y = -(float(np.min(new_coords[0, :])) / image_height - 0.5)
+ max_x = float(np.max(new_coords[1, :])) / image_width + 0.5
+
+ # Clip the bboxes to be sure the fall between [0, 1].
+ min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
+ min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
+ return np.stack([min_y, min_x, max_y, max_x])
+
+
+def rotate_with_bboxes(image, bboxes, degrees, replace):
+ # Rotate the image.
+ image = rotate(image, degrees, replace)
+
+ # Convert bbox coordinates to pixel values.
+ image_height, image_width = image.shape[:2]
+ # pylint:disable=g-long-lambda
+ wrapped_rotate_bbox = lambda bbox: _rotate_bbox(bbox, image_height, image_width, degrees)
+ # pylint:enable=g-long-lambda
+ new_bboxes = np.zeros_like(bboxes)
+ for idx in range(len(bboxes)):
+ new_bboxes[idx] = wrapped_rotate_bbox(bboxes[idx])
+ return image, new_bboxes
+
+
+def translate_x(image, pixels, replace):
+ """Equivalent of PIL Translate in X dimension."""
+ image = Image.fromarray(wrap(image))
+ image = image.transform(image.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0))
+ return unwrap(np.array(image), replace)
+
+
+def translate_y(image, pixels, replace):
+ """Equivalent of PIL Translate in Y dimension."""
+ image = Image.fromarray(wrap(image))
+ image = image.transform(image.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels))
+ return unwrap(np.array(image), replace)
+
+
+def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
+ """Shifts the bbox coordinates by pixels.
+
+ Args:
+ bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
+ of type float that represents the normalized coordinates between 0 and 1.
+ image_height: Int, height of the image.
+ image_width: Int, width of the image.
+ pixels: An int. How many pixels to shift the bbox.
+ shift_horizontal: Boolean. If true then shift in X dimension else shift in
+ Y dimension.
+
+ Returns:
+ A tensor of the same shape as bbox, but now with the shifted coordinates.
+ """
+ pixels = int(pixels)
+ # Convert bbox to integer pixel locations.
+ min_y = int(float(image_height) * bbox[0])
+ min_x = int(float(image_width) * bbox[1])
+ max_y = int(float(image_height) * bbox[2])
+ max_x = int(float(image_width) * bbox[3])
+
+ if shift_horizontal:
+ min_x = np.maximum(0, min_x - pixels)
+ max_x = np.minimum(image_width, max_x - pixels)
+ else:
+ min_y = np.maximum(0, min_y - pixels)
+ max_y = np.minimum(image_height, max_y - pixels)
+
+ # Convert bbox back to floats.
+ min_y = float(min_y) / float(image_height)
+ min_x = float(min_x) / float(image_width)
+ max_y = float(max_y) / float(image_height)
+ max_x = float(max_x) / float(image_width)
+
+ # Clip the bboxes to be sure the fall between [0, 1].
+ min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
+ min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
+ return np.stack([min_y, min_x, max_y, max_x])
+
+
+def translate_bbox(image, bboxes, pixels, replace, shift_horizontal):
+ """Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
+
+ Args:
+ image: 3D uint8 Tensor.
+ bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
+ has 4 elements (min_y, min_x, max_y, max_x) of type float with values
+ between [0, 1].
+ pixels: An int. How many pixels to shift the image and bboxes
+ replace: A one or three value 1D tensor to fill empty pixels.
+ shift_horizontal: Boolean. If true then shift in X dimension else shift in
+ Y dimension.
+
+ Returns:
+ A tuple containing a 3D uint8 Tensor that will be the result of translating
+ image by pixels. The second element of the tuple is bboxes, where now
+ the coordinates will be shifted to reflect the shifted image.
+ """
+ if shift_horizontal:
+ image = translate_x(image, pixels, replace)
+ else:
+ image = translate_y(image, pixels, replace)
+
+ # Convert bbox coordinates to pixel values.
+ image_height, image_width = image.shape[0], image.shape[1]
+ # pylint:disable=g-long-lambda
+ wrapped_shift_bbox = lambda bbox: _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal)
+ # pylint:enable=g-long-lambda
+ new_bboxes = deepcopy(bboxes)
+ num_bboxes = len(bboxes)
+ for idx in range(num_bboxes):
+ new_bboxes[idx] = wrapped_shift_bbox(bboxes[idx])
+ return image.astype(np.uint8), new_bboxes
+
+
+def shear_x(image, level, replace):
+ """Equivalent of PIL Shearing in X dimension."""
+ # Shear parallel to x axis is a projective transform
+ # with a matrix form of:
+ # [1 level
+ # 0 1].
+ image = Image.fromarray(wrap(image))
+ image = image.transform(image.size, Image.AFFINE, (1, level, 0, 0, 1, 0))
+ return unwrap(np.array(image), replace)
+
+
+def shear_y(image, level, replace):
+ """Equivalent of PIL Shearing in Y dimension."""
+ # Shear parallel to y axis is a projective transform
+ # with a matrix form of:
+ # [1 0
+ # level 1].
+ image = Image.fromarray(wrap(image))
+ image = image.transform(image.size, Image.AFFINE, (1, 0, 0, level, 1, 0))
+ return unwrap(np.array(image), replace)
+
+
+def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
+ """Shifts the bbox according to how the image was sheared.
+
+ Args:
+ bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
+ of type float that represents the normalized coordinates between 0 and 1.
+ image_height: Int, height of the image.
+ image_width: Int, height of the image.
+ level: Float. How much to shear the image.
+ shear_horizontal: If true then shear in X dimension else shear in
+ the Y dimension.
+
+ Returns:
+ A tensor of the same shape as bbox, but now with the shifted coordinates.
+ """
+ image_height, image_width = (float(image_height), float(image_width))
+
+ # Change bbox coordinates to be pixels.
+ min_y = int(image_height * bbox[0])
+ min_x = int(image_width * bbox[1])
+ max_y = int(image_height * bbox[2])
+ max_x = int(image_width * bbox[3])
+ coordinates = np.stack(
+ [[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
+ coordinates = coordinates.astype(np.float32)
+
+ # Shear the coordinates according to the translation matrix.
+ if shear_horizontal:
+ translation_matrix = np.stack([[1, 0], [-level, 1]])
+ else:
+ translation_matrix = np.stack([[1, -level], [0, 1]])
+ translation_matrix = translation_matrix.astype(np.float32)
+ new_coords = np.matmul(translation_matrix,
+ np.transpose(coordinates)).astype(np.int32)
+
+ # Find min/max values and convert them back to floats.
+ min_y = float(np.min(new_coords[0, :])) / image_height
+ min_x = float(np.min(new_coords[1, :])) / image_width
+ max_y = float(np.max(new_coords[0, :])) / image_height
+ max_x = float(np.max(new_coords[1, :])) / image_width
+
+ # Clip the bboxes to be sure the fall between [0, 1].
+ min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
+ min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
+ return np.stack([min_y, min_x, max_y, max_x])
+
+
+def shear_with_bboxes(image, bboxes, level, replace, shear_horizontal):
+ """Applies Shear Transformation to the image and shifts the bboxes.
+
+ Args:
+ image: 3D uint8 Tensor.
+ bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
+ has 4 elements (min_y, min_x, max_y, max_x) of type float with values
+ between [0, 1].
+ level: Float. How much to shear the image. This value will be between
+ -0.3 to 0.3.
+ replace: A one or three value 1D tensor to fill empty pixels.
+ shear_horizontal: Boolean. If true then shear in X dimension else shear in
+ the Y dimension.
+
+ Returns:
+ A tuple containing a 3D uint8 Tensor that will be the result of shearing
+ image by level. The second element of the tuple is bboxes, where now
+ the coordinates will be shifted to reflect the sheared image.
+ """
+ if shear_horizontal:
+ image = shear_x(image, level, replace)
+ else:
+ image = shear_y(image, level, replace)
+
+ # Convert bbox coordinates to pixel values.
+ image_height, image_width = image.shape[:2]
+ # pylint:disable=g-long-lambda
+ wrapped_shear_bbox = lambda bbox: _shear_bbox(bbox, image_height, image_width, level, shear_horizontal)
+ # pylint:enable=g-long-lambda
+ new_bboxes = deepcopy(bboxes)
+ num_bboxes = len(bboxes)
+ for idx in range(num_bboxes):
+ new_bboxes[idx] = wrapped_shear_bbox(bboxes[idx])
+ return image.astype(np.uint8), new_bboxes
+
+
+def autocontrast(image):
+ """Implements Autocontrast function from PIL.
+
+ Args:
+ image: A 3D uint8 tensor.
+
+ Returns:
+ The image after it has had autocontrast applied to it and will be of type
+ uint8.
+ """
+
+ def scale_channel(image):
+ """Scale the 2D image using the autocontrast rule."""
+ # A possibly cheaper version can be done using cumsum/unique_with_counts
+ # over the histogram values, rather than iterating over the entire image.
+ # to compute mins and maxes.
+ lo = float(np.min(image))
+ hi = float(np.max(image))
+
+ # Scale the image, making the lowest value 0 and the highest value 255.
+ def scale_values(im):
+ scale = 255.0 / (hi - lo)
+ offset = -lo * scale
+ im = im.astype(np.float32) * scale + offset
+ img = np.clip(im, a_min=0, a_max=255.0)
+ return im.astype(np.uint8)
+
+ result = scale_values(image) if hi > lo else image
+ return result
+
+ # Assumes RGB for now. Scales each channel independently
+ # and then stacks the result.
+ s1 = scale_channel(image[:, :, 0])
+ s2 = scale_channel(image[:, :, 1])
+ s3 = scale_channel(image[:, :, 2])
+ image = np.stack([s1, s2, s3], 2)
+ return image
+
+
+def sharpness(image, factor):
+ """Implements Sharpness function from PIL."""
+ orig_image = image
+ image = image.astype(np.float32)
+ # Make image 4D for conv operation.
+ # SMOOTH PIL Kernel.
+ kernel = np.array([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=np.float32) / 13.
+ result = cv2.filter2D(image, -1, kernel).astype(np.uint8)
+
+ # Blend the final result.
+ return blend(result, orig_image, factor)
+
+
+def equalize(image):
+ """Implements Equalize function from PIL using."""
+
+ def scale_channel(im, c):
+ """Scale the data in the channel to implement equalize."""
+ im = im[:, :, c].astype(np.int32)
+ # Compute the histogram of the image channel.
+ histo, _ = np.histogram(im, range=[0, 255], bins=256)
+
+ # For the purposes of computing the step, filter out the nonzeros.
+ nonzero = np.where(np.not_equal(histo, 0))
+ nonzero_histo = np.reshape(np.take(histo, nonzero), [-1])
+ step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255
+
+ def build_lut(histo, step):
+ # Compute the cumulative sum, shifting by step // 2
+ # and then normalization by step.
+ lut = (np.cumsum(histo) + (step // 2)) // step
+ # Shift lut, prepending with 0.
+ lut = np.concatenate([[0], lut[:-1]], 0)
+ # Clip the counts to be in range. This is done
+ # in the C code for image.point.
+ return np.clip(lut, a_min=0, a_max=255).astype(np.uint8)
+
+ # If step is zero, return the original image. Otherwise, build
+ # lut from the full histogram and step and then index from it.
+ if step == 0:
+ result = im
+ else:
+ result = np.take(build_lut(histo, step), im)
+
+ return result.astype(np.uint8)
+
+ # Assumes RGB for now. Scales each channel independently
+ # and then stacks the result.
+ s1 = scale_channel(image, 0)
+ s2 = scale_channel(image, 1)
+ s3 = scale_channel(image, 2)
+ image = np.stack([s1, s2, s3], 2)
+ return image
+
+
+def wrap(image):
+ """Returns 'image' with an extra channel set to all 1s."""
+ shape = image.shape
+ extended_channel = 255 * np.ones([shape[0], shape[1], 1], image.dtype)
+ extended = np.concatenate([image, extended_channel], 2).astype(image.dtype)
+ return extended
+
+
+def unwrap(image, replace):
+ """Unwraps an image produced by wrap.
+
+ Where there is a 0 in the last channel for every spatial position,
+ the rest of the three channels in that spatial dimension are grayed
+ (set to 128). Operations like translate and shear on a wrapped
+ Tensor will leave 0s in empty locations. Some transformations look
+ at the intensity of values to do preprocessing, and we want these
+ empty pixels to assume the 'average' value, rather than pure black.
+
+
+ Args:
+ image: A 3D Image Tensor with 4 channels.
+ replace: A one or three value 1D tensor to fill empty pixels.
+
+ Returns:
+ image: A 3D image Tensor with 3 channels.
+ """
+ image_shape = image.shape
+ # Flatten the spatial dimensions.
+ flattened_image = np.reshape(image, [-1, image_shape[2]])
+
+ # Find all pixels where the last channel is zero.
+ alpha_channel = flattened_image[:, 3]
+
+ replace = np.concatenate([replace, np.ones([1], image.dtype)], 0)
+
+ # Where they are zero, fill them in with 'replace'.
+ alpha_channel = np.reshape(alpha_channel, (-1, 1))
+ alpha_channel = np.tile(alpha_channel, reps=(1, flattened_image.shape[1]))
+
+ flattened_image = np.where(
+ np.equal(alpha_channel, 0),
+ np.ones_like(
+ flattened_image, dtype=image.dtype) * replace,
+ flattened_image)
+
+ image = np.reshape(flattened_image, image_shape)
+ image = image[:, :, :3]
+ return image.astype(np.uint8)
+
+
+def _cutout_inside_bbox(image, bbox, pad_fraction):
+ """Generates cutout mask and the mean pixel value of the bbox.
+
+ First a location is randomly chosen within the image as the center where the
+ cutout mask will be applied. Note this can be towards the boundaries of the
+ image, so the full cutout mask may not be applied.
+
+ Args:
+ image: 3D uint8 Tensor.
+ bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
+ of type float that represents the normalized coordinates between 0 and 1.
+ pad_fraction: Float that specifies how large the cutout mask should be in
+ in reference to the size of the original bbox. If pad_fraction is 0.25,
+ then the cutout mask will be of shape
+ (0.25 * bbox height, 0.25 * bbox width).
+
+ Returns:
+ A tuple. Fist element is a tensor of the same shape as image where each
+ element is either a 1 or 0 that is used to determine where the image
+ will have cutout applied. The second element is the mean of the pixels
+ in the image where the bbox is located.
+ mask value: [0,1]
+ """
+ image_height, image_width = image.shape[0], image.shape[1]
+ # Transform from shape [1, 4] to [4].
+ bbox = np.squeeze(bbox)
+
+ min_y = int(float(image_height) * bbox[0])
+ min_x = int(float(image_width) * bbox[1])
+ max_y = int(float(image_height) * bbox[2])
+ max_x = int(float(image_width) * bbox[3])
+
+ # Calculate the mean pixel values in the bounding box, which will be used
+ # to fill the cutout region.
+ mean = np.mean(image[min_y:max_y + 1, min_x:max_x + 1], axis=(0, 1))
+ # Cutout mask will be size pad_size_heigh * 2 by pad_size_width * 2 if the
+ # region lies entirely within the bbox.
+ box_height = max_y - min_y + 1
+ box_width = max_x - min_x + 1
+ pad_size_height = int(pad_fraction * (box_height / 2))
+ pad_size_width = int(pad_fraction * (box_width / 2))
+
+ # Sample the center location in the image where the zero mask will be applied.
+ cutout_center_height = np.random.randint(min_y, max_y + 1, dtype=np.int32)
+ cutout_center_width = np.random.randint(min_x, max_x + 1, dtype=np.int32)
+
+ lower_pad = np.maximum(0, cutout_center_height - pad_size_height)
+ upper_pad = np.maximum(
+ 0, image_height - cutout_center_height - pad_size_height)
+ left_pad = np.maximum(0, cutout_center_width - pad_size_width)
+ right_pad = np.maximum(0,
+ image_width - cutout_center_width - pad_size_width)
+
+ cutout_shape = [
+ image_height - (lower_pad + upper_pad),
+ image_width - (left_pad + right_pad)
+ ]
+ padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
+
+ mask = np.pad(np.zeros(
+ cutout_shape, dtype=image.dtype),
+ padding_dims,
+ 'constant',
+ constant_values=1)
+
+ mask = np.expand_dims(mask, 2)
+ mask = np.tile(mask, [1, 1, 3])
+ return mask, mean
+
+
+def bbox_cutout(image, bboxes, pad_fraction, replace_with_mean):
+ """Applies cutout to the image according to bbox information.
+
+ This is a cutout variant that using bbox information to make more informed
+ decisions on where to place the cutout mask.
+
+ Args:
+ image: 3D uint8 Tensor.
+ bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
+ has 4 elements (min_y, min_x, max_y, max_x) of type float with values
+ between [0, 1].
+ pad_fraction: Float that specifies how large the cutout mask should be in
+ in reference to the size of the original bbox. If pad_fraction is 0.25,
+ then the cutout mask will be of shape
+ (0.25 * bbox height, 0.25 * bbox width).
+ replace_with_mean: Boolean that specified what value should be filled in
+ where the cutout mask is applied. Since the incoming image will be of
+ uint8 and will not have had any mean normalization applied, by default
+ we set the value to be 128. If replace_with_mean is True then we find
+ the mean pixel values across the channel dimension and use those to fill
+ in where the cutout mask is applied.
+
+ Returns:
+ A tuple. First element is a tensor of the same shape as image that has
+ cutout applied to it. Second element is the bboxes that were passed in
+ that will be unchanged.
+ """
+
+ def apply_bbox_cutout(image, bboxes, pad_fraction):
+ """Applies cutout to a single bounding box within image."""
+ # Choose a single bounding box to apply cutout to.
+ random_index = np.random.randint(0, bboxes.shape[0], dtype=np.int32)
+ # Select the corresponding bbox and apply cutout.
+ chosen_bbox = np.take(bboxes, random_index, axis=0)
+ mask, mean = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
+
+ # When applying cutout we either set the pixel value to 128 or to the mean
+ # value inside the bbox.
+ replace = mean if replace_with_mean else [128] * 3
+
+ # Apply the cutout mask to the image. Where the mask is 0 we fill it with
+ # `replace`.
+ image = np.where(
+ np.equal(mask, 0),
+ np.ones_like(
+ image, dtype=image.dtype) * replace,
+ image).astype(image.dtype)
+ return image
+
+ # Check to see if there are boxes, if so then apply boxcutout.
+ if len(bboxes) != 0:
+ image = apply_bbox_cutout(image, bboxes, pad_fraction)
+
+ return image, bboxes
+
+
+NAME_TO_FUNC = {
+ 'AutoContrast': autocontrast,
+ 'Equalize': equalize,
+ 'Posterize': posterize,
+ 'Solarize': solarize,
+ 'SolarizeAdd': solarize_add,
+ 'Color': color,
+ 'Contrast': contrast,
+ 'Brightness': brightness,
+ 'Sharpness': sharpness,
+ 'Cutout': cutout,
+ 'BBox_Cutout': bbox_cutout,
+ 'Rotate_BBox': rotate_with_bboxes,
+ # pylint:disable=g-long-lambda
+ 'TranslateX_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
+ image, bboxes, pixels, replace, shift_horizontal=True),
+ 'TranslateY_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
+ image, bboxes, pixels, replace, shift_horizontal=False),
+ 'ShearX_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
+ image, bboxes, level, replace, shear_horizontal=True),
+ 'ShearY_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
+ image, bboxes, level, replace, shear_horizontal=False),
+ # pylint:enable=g-long-lambda
+ 'Rotate_Only_BBoxes': rotate_only_bboxes,
+ 'ShearX_Only_BBoxes': shear_x_only_bboxes,
+ 'ShearY_Only_BBoxes': shear_y_only_bboxes,
+ 'TranslateX_Only_BBoxes': translate_x_only_bboxes,
+ 'TranslateY_Only_BBoxes': translate_y_only_bboxes,
+ 'Flip_Only_BBoxes': flip_only_bboxes,
+ 'Solarize_Only_BBoxes': solarize_only_bboxes,
+ 'Equalize_Only_BBoxes': equalize_only_bboxes,
+ 'Cutout_Only_BBoxes': cutout_only_bboxes,
+}
+
+
+def _randomly_negate_tensor(tensor):
+ """With 50% prob turn the tensor negative."""
+ should_flip = np.floor(np.random.rand() + 0.5) >= 1
+ final_tensor = tensor if should_flip else -tensor
+ return final_tensor
+
+
+def _rotate_level_to_arg(level):
+ level = (level / _MAX_LEVEL) * 30.
+ level = _randomly_negate_tensor(level)
+ return (level, )
+
+
+def _shrink_level_to_arg(level):
+ """Converts level to ratio by which we shrink the image content."""
+ if level == 0:
+ return (1.0, ) # if level is zero, do not shrink the image
+ # Maximum shrinking ratio is 2.9.
+ level = 2. / (_MAX_LEVEL / level) + 0.9
+ return (level, )
+
+
+def _enhance_level_to_arg(level):
+ return ((level / _MAX_LEVEL) * 1.8 + 0.1, )
+
+
+def _shear_level_to_arg(level):
+ level = (level / _MAX_LEVEL) * 0.3
+ # Flip level to negative with 50% chance.
+ level = _randomly_negate_tensor(level)
+ return (level, )
+
+
+def _translate_level_to_arg(level, translate_const):
+ level = (level / _MAX_LEVEL) * float(translate_const)
+ # Flip level to negative with 50% chance.
+ level = _randomly_negate_tensor(level)
+ return (level, )
+
+
+def _bbox_cutout_level_to_arg(level, hparams):
+ cutout_pad_fraction = (level /
+ _MAX_LEVEL) * 0.75 # hparams.cutout_max_pad_fraction
+ return (cutout_pad_fraction, False) # hparams.cutout_bbox_replace_with_mean
+
+
+def level_to_arg(hparams):
+ return {
+ 'AutoContrast': lambda level: (),
+ 'Equalize': lambda level: (),
+ 'Posterize': lambda level: (int((level / _MAX_LEVEL) * 4), ),
+ 'Solarize': lambda level: (int((level / _MAX_LEVEL) * 256), ),
+ 'SolarizeAdd': lambda level: (int((level / _MAX_LEVEL) * 110), ),
+ 'Color': _enhance_level_to_arg,
+ 'Contrast': _enhance_level_to_arg,
+ 'Brightness': _enhance_level_to_arg,
+ 'Sharpness': _enhance_level_to_arg,
+ 'Cutout':
+ lambda level: (int((level / _MAX_LEVEL) * 100), ), # hparams.cutout_const=100
+ # pylint:disable=g-long-lambda
+ 'BBox_Cutout': lambda level: _bbox_cutout_level_to_arg(level, hparams),
+ 'TranslateX_BBox':
+ lambda level: _translate_level_to_arg(level, 250), # hparams.translate_const=250
+ 'TranslateY_BBox':
+ lambda level: _translate_level_to_arg(level, 250), # hparams.translate_cons
+ # pylint:enable=g-long-lambda
+ 'ShearX_BBox': _shear_level_to_arg,
+ 'ShearY_BBox': _shear_level_to_arg,
+ 'Rotate_BBox': _rotate_level_to_arg,
+ 'Rotate_Only_BBoxes': _rotate_level_to_arg,
+ 'ShearX_Only_BBoxes': _shear_level_to_arg,
+ 'ShearY_Only_BBoxes': _shear_level_to_arg,
+ # pylint:disable=g-long-lambda
+ 'TranslateX_Only_BBoxes':
+ lambda level: _translate_level_to_arg(level, 120), # hparams.translate_bbox_const
+ 'TranslateY_Only_BBoxes':
+ lambda level: _translate_level_to_arg(level, 120), # hparams.translate_bbox_const
+ # pylint:enable=g-long-lambda
+ 'Flip_Only_BBoxes': lambda level: (),
+ 'Solarize_Only_BBoxes':
+ lambda level: (int((level / _MAX_LEVEL) * 256), ),
+ 'Equalize_Only_BBoxes': lambda level: (),
+ # pylint:disable=g-long-lambda
+ 'Cutout_Only_BBoxes':
+ lambda level: (int((level / _MAX_LEVEL) * 50), ), # hparams.cutout_bbox_const
+ # pylint:enable=g-long-lambda
+ }
+
+
+def bbox_wrapper(func):
+ """Adds a bboxes function argument to func and returns unchanged bboxes."""
+
+ def wrapper(images, bboxes, *args, **kwargs):
+ return (func(images, *args, **kwargs), bboxes)
+
+ return wrapper
+
+
+def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
+ """Return the function that corresponds to `name` and update `level` param."""
+ func = NAME_TO_FUNC[name]
+ args = level_to_arg(augmentation_hparams)[name](level)
+
+ # Check to see if prob is passed into function. This is used for operations
+ # where we alter bboxes independently.
+ # pytype:disable=wrong-arg-types
+ if 'prob' in inspect.getfullargspec(func)[0]:
+ args = tuple([prob] + list(args))
+ # pytype:enable=wrong-arg-types
+
+ # Add in replace arg if it is required for the function that is being called.
+ if 'replace' in inspect.getfullargspec(func)[0]:
+ # Make sure replace is the final argument
+ assert 'replace' == inspect.getfullargspec(func)[0][-1]
+ args = tuple(list(args) + [replace_value])
+
+ # Add bboxes as the second positional argument for the function if it does
+ # not already exist.
+ if 'bboxes' not in inspect.getfullargspec(func)[0]:
+ func = bbox_wrapper(func)
+ return (func, prob, args)
+
+
+def _apply_func_with_prob(func, image, args, prob, bboxes):
+ """Apply `func` to image w/ `args` as input with probability `prob`."""
+ assert isinstance(args, tuple)
+ assert 'bboxes' == inspect.getfullargspec(func)[0][1]
+
+ # If prob is a function argument, then this randomness is being handled
+ # inside the function, so make sure it is always called.
+ if 'prob' in inspect.getfullargspec(func)[0]:
+ prob = 1.0
+
+ # Apply the function with probability `prob`.
+ should_apply_op = np.floor(np.random.rand() + 0.5) >= 1
+ if should_apply_op:
+ augmented_image, augmented_bboxes = func(image, bboxes, *args)
+ else:
+ augmented_image, augmented_bboxes = (image, bboxes)
+ return augmented_image, augmented_bboxes
+
+
+def select_and_apply_random_policy(policies, image, bboxes):
+ """Select a random policy from `policies` and apply it to `image`."""
+ policy_to_select = np.random.randint(0, len(policies), dtype=np.int32)
+ # policy_to_select = 6 # for test
+ for (i, policy) in enumerate(policies):
+ if i == policy_to_select:
+ image, bboxes = policy(image, bboxes)
+ return (image, bboxes)
+
+
+def build_and_apply_nas_policy(policies, image, bboxes, augmentation_hparams):
+ """Build a policy from the given policies passed in and apply to image.
+
+ Args:
+ policies: list of lists of tuples in the form `(func, prob, level)`, `func`
+ is a string name of the augmentation function, `prob` is the probability
+ of applying the `func` operation, `level` is the input argument for
+ `func`.
+ image: numpy array that the resulting policy will be applied to.
+ bboxes:
+ augmentation_hparams: Hparams associated with the NAS learned policy.
+
+ Returns:
+ A version of image that now has data augmentation applied to it based on
+ the `policies` pass into the function. Additionally, returns bboxes if
+ a value for them is passed in that is not None
+ """
+ replace_value = [128, 128, 128]
+
+ # func is the string name of the augmentation function, prob is the
+ # probability of applying the operation and level is the parameter associated
+
+ # tf_policies are functions that take in an image and return an augmented
+ # image.
+ tf_policies = []
+ for policy in policies:
+ tf_policy = []
+ # Link string name to the correct python function and make sure the correct
+ # argument is passed into that function.
+ for policy_info in policy:
+ policy_info = list(
+ policy_info) + [replace_value, augmentation_hparams]
+
+ tf_policy.append(_parse_policy_info(*policy_info))
+ # Now build the tf policy that will apply the augmentation procedue
+ # on image.
+ def make_final_policy(tf_policy_):
+ def final_policy(image_, bboxes_):
+ for func, prob, args in tf_policy_:
+ image_, bboxes_ = _apply_func_with_prob(func, image_, args,
+ prob, bboxes_)
+ return image_, bboxes_
+
+ return final_policy
+
+ tf_policies.append(make_final_policy(tf_policy))
+
+ augmented_images, augmented_bboxes = select_and_apply_random_policy(
+ tf_policies, image, bboxes)
+ # If no bounding boxes were specified, then just return the images.
+ return (augmented_images, augmented_bboxes)
+
+
+# TODO(barretzoph): Add in ArXiv link once paper is out.
+def distort_image_with_autoaugment(image, bboxes, augmentation_name):
+ """Applies the AutoAugment policy to `image` and `bboxes`.
+
+ Args:
+ image: `Tensor` of shape [height, width, 3] representing an image.
+ bboxes: `Tensor` of shape [N, 4] representing ground truth boxes that are
+ normalized between [0, 1].
+ augmentation_name: The name of the AutoAugment policy to use. The available
+ options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for
+ all of the results in the paper and was found to achieve the best results
+ on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
+ found on the COCO dataset that have slight variation in what operations
+ were used during the search procedure along with how many operations are
+ applied in parallel to a single image (2 vs 3).
+
+ Returns:
+ A tuple containing the augmented versions of `image` and `bboxes`.
+ """
+ available_policies = {
+ 'v0': policy_v0,
+ 'v1': policy_v1,
+ 'v2': policy_v2,
+ 'v3': policy_v3,
+ 'test': policy_vtest
+ }
+ if augmentation_name not in available_policies:
+ raise ValueError('Invalid augmentation_name: {}'.format(
+ augmentation_name))
+
+ policy = available_policies[augmentation_name]()
+ augmentation_hparams = {}
+ return build_and_apply_nas_policy(policy, image, bboxes,
+ augmentation_hparams)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/batch_operators.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/batch_operators.py
new file mode 100644
index 000000000..e43fb7d20
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/batch_operators.py
@@ -0,0 +1,1060 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+try:
+ from collections.abc import Sequence
+except Exception:
+ from collections import Sequence
+
+import cv2
+import math
+import numpy as np
+from .operators import register_op, BaseOperator, Resize
+from .op_helper import jaccard_overlap, gaussian2D, gaussian_radius, draw_umich_gaussian
+from .atss_assigner import ATSSAssigner
+from scipy import ndimage
+
+from ppdet.modeling import bbox_utils
+from ppdet.utils.logger import setup_logger
+from ppdet.modeling.keypoint_utils import get_affine_transform, affine_transform
+logger = setup_logger(__name__)
+
+__all__ = [
+ 'PadBatch',
+ 'BatchRandomResize',
+ 'Gt2YoloTarget',
+ 'Gt2FCOSTarget',
+ 'Gt2TTFTarget',
+ 'Gt2Solov2Target',
+ 'Gt2SparseRCNNTarget',
+ 'PadMaskBatch',
+ 'Gt2GFLTarget',
+ 'Gt2CenterNetTarget',
+]
+
+
+@register_op
+class PadBatch(BaseOperator):
+ """
+ Pad a batch of samples so they can be divisible by a stride.
+ The layout of each image should be 'CHW'.
+ Args:
+ pad_to_stride (int): If `pad_to_stride > 0`, pad zeros to ensure
+ height and width is divisible by `pad_to_stride`.
+ """
+
+ def __init__(self, pad_to_stride=0):
+ super(PadBatch, self).__init__()
+ self.pad_to_stride = pad_to_stride
+
+ def __call__(self, samples, context=None):
+ """
+ Args:
+ samples (list): a batch of sample, each is dict.
+ """
+ coarsest_stride = self.pad_to_stride
+
+ max_shape = np.array([data['image'].shape for data in samples]).max(
+ axis=0)
+ if coarsest_stride > 0:
+ max_shape[1] = int(
+ np.ceil(max_shape[1] / coarsest_stride) * coarsest_stride)
+ max_shape[2] = int(
+ np.ceil(max_shape[2] / coarsest_stride) * coarsest_stride)
+
+ for data in samples:
+ im = data['image']
+ im_c, im_h, im_w = im.shape[:]
+ padding_im = np.zeros(
+ (im_c, max_shape[1], max_shape[2]), dtype=np.float32)
+ padding_im[:, :im_h, :im_w] = im
+ data['image'] = padding_im
+ if 'semantic' in data and data['semantic'] is not None:
+ semantic = data['semantic']
+ padding_sem = np.zeros(
+ (1, max_shape[1], max_shape[2]), dtype=np.float32)
+ padding_sem[:, :im_h, :im_w] = semantic
+ data['semantic'] = padding_sem
+ if 'gt_segm' in data and data['gt_segm'] is not None:
+ gt_segm = data['gt_segm']
+ padding_segm = np.zeros(
+ (gt_segm.shape[0], max_shape[1], max_shape[2]),
+ dtype=np.uint8)
+ padding_segm[:, :im_h, :im_w] = gt_segm
+ data['gt_segm'] = padding_segm
+
+ if 'gt_rbox2poly' in data and data['gt_rbox2poly'] is not None:
+ # ploy to rbox
+ polys = data['gt_rbox2poly']
+ rbox = bbox_utils.poly2rbox(polys)
+ data['gt_rbox'] = rbox
+
+ return samples
+
+
+@register_op
+class BatchRandomResize(BaseOperator):
+ """
+ Resize image to target size randomly. random target_size and interpolation method
+ Args:
+ target_size (int, list, tuple): image target size, if random size is True, must be list or tuple
+ keep_ratio (bool): whether keep_raio or not, default true
+ interp (int): the interpolation method
+ random_size (bool): whether random select target size of image
+ random_interp (bool): whether random select interpolation method
+ """
+
+ def __init__(self,
+ target_size,
+ keep_ratio,
+ interp=cv2.INTER_NEAREST,
+ random_size=True,
+ random_interp=False):
+ super(BatchRandomResize, self).__init__()
+ self.keep_ratio = keep_ratio
+ self.interps = [
+ cv2.INTER_NEAREST,
+ cv2.INTER_LINEAR,
+ cv2.INTER_AREA,
+ cv2.INTER_CUBIC,
+ cv2.INTER_LANCZOS4,
+ ]
+ self.interp = interp
+ assert isinstance(target_size, (
+ int, Sequence)), "target_size must be int, list or tuple"
+ if random_size and not isinstance(target_size, list):
+ raise TypeError(
+ "Type of target_size is invalid when random_size is True. Must be List, now is {}".
+ format(type(target_size)))
+ self.target_size = target_size
+ self.random_size = random_size
+ self.random_interp = random_interp
+
+ def __call__(self, samples, context=None):
+ if self.random_size:
+ index = np.random.choice(len(self.target_size))
+ target_size = self.target_size[index]
+ else:
+ target_size = self.target_size
+
+ if self.random_interp:
+ interp = np.random.choice(self.interps)
+ else:
+ interp = self.interp
+
+ resizer = Resize(target_size, keep_ratio=self.keep_ratio, interp=interp)
+ return resizer(samples, context=context)
+
+
+@register_op
+class Gt2YoloTarget(BaseOperator):
+ """
+ Generate YOLOv3 targets by groud truth data, this operator is only used in
+ fine grained YOLOv3 loss mode
+ """
+
+ def __init__(self,
+ anchors,
+ anchor_masks,
+ downsample_ratios,
+ num_classes=80,
+ iou_thresh=1.):
+ super(Gt2YoloTarget, self).__init__()
+ self.anchors = anchors
+ self.anchor_masks = anchor_masks
+ self.downsample_ratios = downsample_ratios
+ self.num_classes = num_classes
+ self.iou_thresh = iou_thresh
+
+ def __call__(self, samples, context=None):
+ assert len(self.anchor_masks) == len(self.downsample_ratios), \
+ "anchor_masks', and 'downsample_ratios' should have same length."
+
+ h, w = samples[0]['image'].shape[1:3]
+ an_hw = np.array(self.anchors) / np.array([[w, h]])
+ for sample in samples:
+ gt_bbox = sample['gt_bbox']
+ gt_class = sample['gt_class']
+ if 'gt_score' not in sample:
+ sample['gt_score'] = np.ones(
+ (gt_bbox.shape[0], 1), dtype=np.float32)
+ gt_score = sample['gt_score']
+ for i, (
+ mask, downsample_ratio
+ ) in enumerate(zip(self.anchor_masks, self.downsample_ratios)):
+ grid_h = int(h / downsample_ratio)
+ grid_w = int(w / downsample_ratio)
+ target = np.zeros(
+ (len(mask), 6 + self.num_classes, grid_h, grid_w),
+ dtype=np.float32)
+ for b in range(gt_bbox.shape[0]):
+ gx, gy, gw, gh = gt_bbox[b, :]
+ cls = gt_class[b]
+ score = gt_score[b]
+ if gw <= 0. or gh <= 0. or score <= 0.:
+ continue
+
+ # find best match anchor index
+ best_iou = 0.
+ best_idx = -1
+ for an_idx in range(an_hw.shape[0]):
+ iou = jaccard_overlap(
+ [0., 0., gw, gh],
+ [0., 0., an_hw[an_idx, 0], an_hw[an_idx, 1]])
+ if iou > best_iou:
+ best_iou = iou
+ best_idx = an_idx
+
+ gi = int(gx * grid_w)
+ gj = int(gy * grid_h)
+
+ # gtbox should be regresed in this layes if best match
+ # anchor index in anchor mask of this layer
+ if best_idx in mask:
+ best_n = mask.index(best_idx)
+
+ # x, y, w, h, scale
+ target[best_n, 0, gj, gi] = gx * grid_w - gi
+ target[best_n, 1, gj, gi] = gy * grid_h - gj
+ target[best_n, 2, gj, gi] = np.log(
+ gw * w / self.anchors[best_idx][0])
+ target[best_n, 3, gj, gi] = np.log(
+ gh * h / self.anchors[best_idx][1])
+ target[best_n, 4, gj, gi] = 2.0 - gw * gh
+
+ # objectness record gt_score
+ target[best_n, 5, gj, gi] = score
+
+ # classification
+ target[best_n, 6 + cls, gj, gi] = 1.
+
+ # For non-matched anchors, calculate the target if the iou
+ # between anchor and gt is larger than iou_thresh
+ if self.iou_thresh < 1:
+ for idx, mask_i in enumerate(mask):
+ if mask_i == best_idx: continue
+ iou = jaccard_overlap(
+ [0., 0., gw, gh],
+ [0., 0., an_hw[mask_i, 0], an_hw[mask_i, 1]])
+ if iou > self.iou_thresh and target[idx, 5, gj,
+ gi] == 0.:
+ # x, y, w, h, scale
+ target[idx, 0, gj, gi] = gx * grid_w - gi
+ target[idx, 1, gj, gi] = gy * grid_h - gj
+ target[idx, 2, gj, gi] = np.log(
+ gw * w / self.anchors[mask_i][0])
+ target[idx, 3, gj, gi] = np.log(
+ gh * h / self.anchors[mask_i][1])
+ target[idx, 4, gj, gi] = 2.0 - gw * gh
+
+ # objectness record gt_score
+ target[idx, 5, gj, gi] = score
+
+ # classification
+ target[idx, 6 + cls, gj, gi] = 1.
+ sample['target{}'.format(i)] = target
+
+ # remove useless gt_class and gt_score after target calculated
+ sample.pop('gt_class')
+ sample.pop('gt_score')
+
+ return samples
+
+
+@register_op
+class Gt2FCOSTarget(BaseOperator):
+ """
+ Generate FCOS targets by groud truth data
+ """
+
+ def __init__(self,
+ object_sizes_boundary,
+ center_sampling_radius,
+ downsample_ratios,
+ norm_reg_targets=False):
+ super(Gt2FCOSTarget, self).__init__()
+ self.center_sampling_radius = center_sampling_radius
+ self.downsample_ratios = downsample_ratios
+ self.INF = np.inf
+ self.object_sizes_boundary = [-1] + object_sizes_boundary + [self.INF]
+ object_sizes_of_interest = []
+ for i in range(len(self.object_sizes_boundary) - 1):
+ object_sizes_of_interest.append([
+ self.object_sizes_boundary[i], self.object_sizes_boundary[i + 1]
+ ])
+ self.object_sizes_of_interest = object_sizes_of_interest
+ self.norm_reg_targets = norm_reg_targets
+
+ def _compute_points(self, w, h):
+ """
+ compute the corresponding points in each feature map
+ :param h: image height
+ :param w: image width
+ :return: points from all feature map
+ """
+ locations = []
+ for stride in self.downsample_ratios:
+ shift_x = np.arange(0, w, stride).astype(np.float32)
+ shift_y = np.arange(0, h, stride).astype(np.float32)
+ shift_x, shift_y = np.meshgrid(shift_x, shift_y)
+ shift_x = shift_x.flatten()
+ shift_y = shift_y.flatten()
+ location = np.stack([shift_x, shift_y], axis=1) + stride // 2
+ locations.append(location)
+ num_points_each_level = [len(location) for location in locations]
+ locations = np.concatenate(locations, axis=0)
+ return locations, num_points_each_level
+
+ def _convert_xywh2xyxy(self, gt_bbox, w, h):
+ """
+ convert the bounding box from style xywh to xyxy
+ :param gt_bbox: bounding boxes normalized into [0, 1]
+ :param w: image width
+ :param h: image height
+ :return: bounding boxes in xyxy style
+ """
+ bboxes = gt_bbox.copy()
+ bboxes[:, [0, 2]] = bboxes[:, [0, 2]] * w
+ bboxes[:, [1, 3]] = bboxes[:, [1, 3]] * h
+ bboxes[:, 2] = bboxes[:, 0] + bboxes[:, 2]
+ bboxes[:, 3] = bboxes[:, 1] + bboxes[:, 3]
+ return bboxes
+
+ def _check_inside_boxes_limited(self, gt_bbox, xs, ys,
+ num_points_each_level):
+ """
+ check if points is within the clipped boxes
+ :param gt_bbox: bounding boxes
+ :param xs: horizontal coordinate of points
+ :param ys: vertical coordinate of points
+ :return: the mask of points is within gt_box or not
+ """
+ bboxes = np.reshape(
+ gt_bbox, newshape=[1, gt_bbox.shape[0], gt_bbox.shape[1]])
+ bboxes = np.tile(bboxes, reps=[xs.shape[0], 1, 1])
+ ct_x = (bboxes[:, :, 0] + bboxes[:, :, 2]) / 2
+ ct_y = (bboxes[:, :, 1] + bboxes[:, :, 3]) / 2
+ beg = 0
+ clipped_box = bboxes.copy()
+ for lvl, stride in enumerate(self.downsample_ratios):
+ end = beg + num_points_each_level[lvl]
+ stride_exp = self.center_sampling_radius * stride
+ clipped_box[beg:end, :, 0] = np.maximum(
+ bboxes[beg:end, :, 0], ct_x[beg:end, :] - stride_exp)
+ clipped_box[beg:end, :, 1] = np.maximum(
+ bboxes[beg:end, :, 1], ct_y[beg:end, :] - stride_exp)
+ clipped_box[beg:end, :, 2] = np.minimum(
+ bboxes[beg:end, :, 2], ct_x[beg:end, :] + stride_exp)
+ clipped_box[beg:end, :, 3] = np.minimum(
+ bboxes[beg:end, :, 3], ct_y[beg:end, :] + stride_exp)
+ beg = end
+ l_res = xs - clipped_box[:, :, 0]
+ r_res = clipped_box[:, :, 2] - xs
+ t_res = ys - clipped_box[:, :, 1]
+ b_res = clipped_box[:, :, 3] - ys
+ clipped_box_reg_targets = np.stack([l_res, t_res, r_res, b_res], axis=2)
+ inside_gt_box = np.min(clipped_box_reg_targets, axis=2) > 0
+ return inside_gt_box
+
+ def __call__(self, samples, context=None):
+ assert len(self.object_sizes_of_interest) == len(self.downsample_ratios), \
+ "object_sizes_of_interest', and 'downsample_ratios' should have same length."
+
+ for sample in samples:
+ im = sample['image']
+ bboxes = sample['gt_bbox']
+ gt_class = sample['gt_class']
+ # calculate the locations
+ h, w = im.shape[1:3]
+ points, num_points_each_level = self._compute_points(w, h)
+ object_scale_exp = []
+ for i, num_pts in enumerate(num_points_each_level):
+ object_scale_exp.append(
+ np.tile(
+ np.array([self.object_sizes_of_interest[i]]),
+ reps=[num_pts, 1]))
+ object_scale_exp = np.concatenate(object_scale_exp, axis=0)
+
+ gt_area = (bboxes[:, 2] - bboxes[:, 0]) * (
+ bboxes[:, 3] - bboxes[:, 1])
+ xs, ys = points[:, 0], points[:, 1]
+ xs = np.reshape(xs, newshape=[xs.shape[0], 1])
+ xs = np.tile(xs, reps=[1, bboxes.shape[0]])
+ ys = np.reshape(ys, newshape=[ys.shape[0], 1])
+ ys = np.tile(ys, reps=[1, bboxes.shape[0]])
+
+ l_res = xs - bboxes[:, 0]
+ r_res = bboxes[:, 2] - xs
+ t_res = ys - bboxes[:, 1]
+ b_res = bboxes[:, 3] - ys
+ reg_targets = np.stack([l_res, t_res, r_res, b_res], axis=2)
+ if self.center_sampling_radius > 0:
+ is_inside_box = self._check_inside_boxes_limited(
+ bboxes, xs, ys, num_points_each_level)
+ else:
+ is_inside_box = np.min(reg_targets, axis=2) > 0
+ # check if the targets is inside the corresponding level
+ max_reg_targets = np.max(reg_targets, axis=2)
+ lower_bound = np.tile(
+ np.expand_dims(
+ object_scale_exp[:, 0], axis=1),
+ reps=[1, max_reg_targets.shape[1]])
+ high_bound = np.tile(
+ np.expand_dims(
+ object_scale_exp[:, 1], axis=1),
+ reps=[1, max_reg_targets.shape[1]])
+ is_match_current_level = \
+ (max_reg_targets > lower_bound) & \
+ (max_reg_targets < high_bound)
+ points2gtarea = np.tile(
+ np.expand_dims(
+ gt_area, axis=0), reps=[xs.shape[0], 1])
+ points2gtarea[is_inside_box == 0] = self.INF
+ points2gtarea[is_match_current_level == 0] = self.INF
+ points2min_area = points2gtarea.min(axis=1)
+ points2min_area_ind = points2gtarea.argmin(axis=1)
+ labels = gt_class[points2min_area_ind] + 1
+ labels[points2min_area == self.INF] = 0
+ reg_targets = reg_targets[range(xs.shape[0]), points2min_area_ind]
+ ctn_targets = np.sqrt((reg_targets[:, [0, 2]].min(axis=1) / \
+ reg_targets[:, [0, 2]].max(axis=1)) * \
+ (reg_targets[:, [1, 3]].min(axis=1) / \
+ reg_targets[:, [1, 3]].max(axis=1))).astype(np.float32)
+ ctn_targets = np.reshape(
+ ctn_targets, newshape=[ctn_targets.shape[0], 1])
+ ctn_targets[labels <= 0] = 0
+ pos_ind = np.nonzero(labels != 0)
+ reg_targets_pos = reg_targets[pos_ind[0], :]
+ split_sections = []
+ beg = 0
+ for lvl in range(len(num_points_each_level)):
+ end = beg + num_points_each_level[lvl]
+ split_sections.append(end)
+ beg = end
+ labels_by_level = np.split(labels, split_sections, axis=0)
+ reg_targets_by_level = np.split(reg_targets, split_sections, axis=0)
+ ctn_targets_by_level = np.split(ctn_targets, split_sections, axis=0)
+ for lvl in range(len(self.downsample_ratios)):
+ grid_w = int(np.ceil(w / self.downsample_ratios[lvl]))
+ grid_h = int(np.ceil(h / self.downsample_ratios[lvl]))
+ if self.norm_reg_targets:
+ sample['reg_target{}'.format(lvl)] = \
+ np.reshape(
+ reg_targets_by_level[lvl] / \
+ self.downsample_ratios[lvl],
+ newshape=[grid_h, grid_w, 4])
+ else:
+ sample['reg_target{}'.format(lvl)] = np.reshape(
+ reg_targets_by_level[lvl],
+ newshape=[grid_h, grid_w, 4])
+ sample['labels{}'.format(lvl)] = np.reshape(
+ labels_by_level[lvl], newshape=[grid_h, grid_w, 1])
+ sample['centerness{}'.format(lvl)] = np.reshape(
+ ctn_targets_by_level[lvl], newshape=[grid_h, grid_w, 1])
+
+ sample.pop('is_crowd', None)
+ sample.pop('difficult', None)
+ sample.pop('gt_class', None)
+ sample.pop('gt_bbox', None)
+ return samples
+
+
+@register_op
+class Gt2GFLTarget(BaseOperator):
+ """
+ Generate GFocal loss targets by groud truth data
+ """
+
+ def __init__(self,
+ num_classes=80,
+ downsample_ratios=[8, 16, 32, 64, 128],
+ grid_cell_scale=4,
+ cell_offset=0):
+ super(Gt2GFLTarget, self).__init__()
+ self.num_classes = num_classes
+ self.downsample_ratios = downsample_ratios
+ self.grid_cell_scale = grid_cell_scale
+ self.cell_offset = cell_offset
+
+ self.assigner = ATSSAssigner()
+
+ def get_grid_cells(self, featmap_size, scale, stride, offset=0):
+ """
+ Generate grid cells of a feature map for target assignment.
+ Args:
+ featmap_size: Size of a single level feature map.
+ scale: Grid cell scale.
+ stride: Down sample stride of the feature map.
+ offset: Offset of grid cells.
+ return:
+ Grid_cells xyxy position. Size should be [feat_w * feat_h, 4]
+ """
+ cell_size = stride * scale
+ h, w = featmap_size
+ x_range = (np.arange(w, dtype=np.float32) + offset) * stride
+ y_range = (np.arange(h, dtype=np.float32) + offset) * stride
+ x, y = np.meshgrid(x_range, y_range)
+ y = y.flatten()
+ x = x.flatten()
+ grid_cells = np.stack(
+ [
+ x - 0.5 * cell_size, y - 0.5 * cell_size, x + 0.5 * cell_size,
+ y + 0.5 * cell_size
+ ],
+ axis=-1)
+ return grid_cells
+
+ def get_sample(self, assign_gt_inds, gt_bboxes):
+ pos_inds = np.unique(np.nonzero(assign_gt_inds > 0)[0])
+ neg_inds = np.unique(np.nonzero(assign_gt_inds == 0)[0])
+ pos_assigned_gt_inds = assign_gt_inds[pos_inds] - 1
+
+ if gt_bboxes.size == 0:
+ # hack for index error case
+ assert pos_assigned_gt_inds.size == 0
+ pos_gt_bboxes = np.empty_like(gt_bboxes).reshape(-1, 4)
+ else:
+ if len(gt_bboxes.shape) < 2:
+ gt_bboxes = gt_bboxes.resize(-1, 4)
+ pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds, :]
+ return pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds
+
+ def __call__(self, samples, context=None):
+ assert len(samples) > 0
+ batch_size = len(samples)
+ # get grid cells of image
+ h, w = samples[0]['image'].shape[1:3]
+ multi_level_grid_cells = []
+ for stride in self.downsample_ratios:
+ featmap_size = (int(math.ceil(h / stride)),
+ int(math.ceil(w / stride)))
+ multi_level_grid_cells.append(
+ self.get_grid_cells(featmap_size, self.grid_cell_scale, stride,
+ self.cell_offset))
+ mlvl_grid_cells_list = [
+ multi_level_grid_cells for i in range(batch_size)
+ ]
+ # pixel cell number of multi-level feature maps
+ num_level_cells = [
+ grid_cells.shape[0] for grid_cells in mlvl_grid_cells_list[0]
+ ]
+ num_level_cells_list = [num_level_cells] * batch_size
+ # concat all level cells and to a single array
+ for i in range(batch_size):
+ mlvl_grid_cells_list[i] = np.concatenate(mlvl_grid_cells_list[i])
+ # target assign on all images
+ for sample, grid_cells, num_level_cells in zip(
+ samples, mlvl_grid_cells_list, num_level_cells_list):
+ gt_bboxes = sample['gt_bbox']
+ gt_labels = sample['gt_class'].squeeze()
+ if gt_labels.size == 1:
+ gt_labels = np.array([gt_labels]).astype(np.int32)
+ gt_bboxes_ignore = None
+ assign_gt_inds, _ = self.assigner(grid_cells, num_level_cells,
+ gt_bboxes, gt_bboxes_ignore,
+ gt_labels)
+ pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds = self.get_sample(
+ assign_gt_inds, gt_bboxes)
+
+ num_cells = grid_cells.shape[0]
+ bbox_targets = np.zeros_like(grid_cells)
+ bbox_weights = np.zeros_like(grid_cells)
+ labels = np.ones([num_cells], dtype=np.int64) * self.num_classes
+ label_weights = np.zeros([num_cells], dtype=np.float32)
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ if not np.any(gt_labels):
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[pos_assigned_gt_inds]
+
+ label_weights[pos_inds] = 1.0
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+ sample['grid_cells'] = grid_cells
+ sample['labels'] = labels
+ sample['label_weights'] = label_weights
+ sample['bbox_targets'] = bbox_targets
+ sample['pos_num'] = max(pos_inds.size, 1)
+ sample.pop('is_crowd', None)
+ sample.pop('difficult', None)
+ sample.pop('gt_class', None)
+ sample.pop('gt_bbox', None)
+ sample.pop('gt_score', None)
+ return samples
+
+
+@register_op
+class Gt2TTFTarget(BaseOperator):
+ __shared__ = ['num_classes']
+ """
+ Gt2TTFTarget
+ Generate TTFNet targets by ground truth data
+
+ Args:
+ num_classes(int): the number of classes.
+ down_ratio(int): the down ratio from images to heatmap, 4 by default.
+ alpha(float): the alpha parameter to generate gaussian target.
+ 0.54 by default.
+ """
+
+ def __init__(self, num_classes=80, down_ratio=4, alpha=0.54):
+ super(Gt2TTFTarget, self).__init__()
+ self.down_ratio = down_ratio
+ self.num_classes = num_classes
+ self.alpha = alpha
+
+ def __call__(self, samples, context=None):
+ output_size = samples[0]['image'].shape[1]
+ feat_size = output_size // self.down_ratio
+ for sample in samples:
+ heatmap = np.zeros(
+ (self.num_classes, feat_size, feat_size), dtype='float32')
+ box_target = np.ones(
+ (4, feat_size, feat_size), dtype='float32') * -1
+ reg_weight = np.zeros((1, feat_size, feat_size), dtype='float32')
+
+ gt_bbox = sample['gt_bbox']
+ gt_class = sample['gt_class']
+
+ bbox_w = gt_bbox[:, 2] - gt_bbox[:, 0] + 1
+ bbox_h = gt_bbox[:, 3] - gt_bbox[:, 1] + 1
+ area = bbox_w * bbox_h
+ boxes_areas_log = np.log(area)
+ boxes_ind = np.argsort(boxes_areas_log, axis=0)[::-1]
+ boxes_area_topk_log = boxes_areas_log[boxes_ind]
+ gt_bbox = gt_bbox[boxes_ind]
+ gt_class = gt_class[boxes_ind]
+
+ feat_gt_bbox = gt_bbox / self.down_ratio
+ feat_gt_bbox = np.clip(feat_gt_bbox, 0, feat_size - 1)
+ feat_hs, feat_ws = (feat_gt_bbox[:, 3] - feat_gt_bbox[:, 1],
+ feat_gt_bbox[:, 2] - feat_gt_bbox[:, 0])
+
+ ct_inds = np.stack(
+ [(gt_bbox[:, 0] + gt_bbox[:, 2]) / 2,
+ (gt_bbox[:, 1] + gt_bbox[:, 3]) / 2],
+ axis=1) / self.down_ratio
+
+ h_radiuses_alpha = (feat_hs / 2. * self.alpha).astype('int32')
+ w_radiuses_alpha = (feat_ws / 2. * self.alpha).astype('int32')
+
+ for k in range(len(gt_bbox)):
+ cls_id = gt_class[k]
+ fake_heatmap = np.zeros((feat_size, feat_size), dtype='float32')
+ self.draw_truncate_gaussian(fake_heatmap, ct_inds[k],
+ h_radiuses_alpha[k],
+ w_radiuses_alpha[k])
+
+ heatmap[cls_id] = np.maximum(heatmap[cls_id], fake_heatmap)
+ box_target_inds = fake_heatmap > 0
+ box_target[:, box_target_inds] = gt_bbox[k][:, None]
+
+ local_heatmap = fake_heatmap[box_target_inds]
+ ct_div = np.sum(local_heatmap)
+ local_heatmap *= boxes_area_topk_log[k]
+ reg_weight[0, box_target_inds] = local_heatmap / ct_div
+ sample['ttf_heatmap'] = heatmap
+ sample['ttf_box_target'] = box_target
+ sample['ttf_reg_weight'] = reg_weight
+ sample.pop('is_crowd', None)
+ sample.pop('difficult', None)
+ sample.pop('gt_class', None)
+ sample.pop('gt_bbox', None)
+ sample.pop('gt_score', None)
+ return samples
+
+ def draw_truncate_gaussian(self, heatmap, center, h_radius, w_radius):
+ h, w = 2 * h_radius + 1, 2 * w_radius + 1
+ sigma_x = w / 6
+ sigma_y = h / 6
+ gaussian = gaussian2D((h, w), sigma_x, sigma_y)
+
+ x, y = int(center[0]), int(center[1])
+
+ height, width = heatmap.shape[0:2]
+
+ left, right = min(x, w_radius), min(width - x, w_radius + 1)
+ top, bottom = min(y, h_radius), min(height - y, h_radius + 1)
+
+ masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
+ masked_gaussian = gaussian[h_radius - top:h_radius + bottom, w_radius -
+ left:w_radius + right]
+ if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
+ heatmap[y - top:y + bottom, x - left:x + right] = np.maximum(
+ masked_heatmap, masked_gaussian)
+ return heatmap
+
+
+@register_op
+class Gt2Solov2Target(BaseOperator):
+ """Assign mask target and labels in SOLOv2 network.
+ The code of this function is based on:
+ https://github.com/WXinlong/SOLO/blob/master/mmdet/models/anchor_heads/solov2_head.py#L271
+ Args:
+ num_grids (list): The list of feature map grids size.
+ scale_ranges (list): The list of mask boundary range.
+ coord_sigma (float): The coefficient of coordinate area length.
+ sampling_ratio (float): The ratio of down sampling.
+ """
+
+ def __init__(self,
+ num_grids=[40, 36, 24, 16, 12],
+ scale_ranges=[[1, 96], [48, 192], [96, 384], [192, 768],
+ [384, 2048]],
+ coord_sigma=0.2,
+ sampling_ratio=4.0):
+ super(Gt2Solov2Target, self).__init__()
+ self.num_grids = num_grids
+ self.scale_ranges = scale_ranges
+ self.coord_sigma = coord_sigma
+ self.sampling_ratio = sampling_ratio
+
+ def _scale_size(self, im, scale):
+ h, w = im.shape[:2]
+ new_size = (int(w * float(scale) + 0.5), int(h * float(scale) + 0.5))
+ resized_img = cv2.resize(
+ im, None, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
+ return resized_img
+
+ def __call__(self, samples, context=None):
+ sample_id = 0
+ max_ins_num = [0] * len(self.num_grids)
+ for sample in samples:
+ gt_bboxes_raw = sample['gt_bbox']
+ gt_labels_raw = sample['gt_class'] + 1
+ im_c, im_h, im_w = sample['image'].shape[:]
+ gt_masks_raw = sample['gt_segm'].astype(np.uint8)
+ mask_feat_size = [
+ int(im_h / self.sampling_ratio), int(im_w / self.sampling_ratio)
+ ]
+ gt_areas = np.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) *
+ (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))
+ ins_ind_label_list = []
+ idx = 0
+ for (lower_bound, upper_bound), num_grid \
+ in zip(self.scale_ranges, self.num_grids):
+
+ hit_indices = ((gt_areas >= lower_bound) &
+ (gt_areas <= upper_bound)).nonzero()[0]
+ num_ins = len(hit_indices)
+
+ ins_label = []
+ grid_order = []
+ cate_label = np.zeros([num_grid, num_grid], dtype=np.int64)
+ ins_ind_label = np.zeros([num_grid**2], dtype=np.bool)
+
+ if num_ins == 0:
+ ins_label = np.zeros(
+ [1, mask_feat_size[0], mask_feat_size[1]],
+ dtype=np.uint8)
+ ins_ind_label_list.append(ins_ind_label)
+ sample['cate_label{}'.format(idx)] = cate_label.flatten()
+ sample['ins_label{}'.format(idx)] = ins_label
+ sample['grid_order{}'.format(idx)] = np.asarray(
+ [sample_id * num_grid * num_grid + 0], dtype=np.int32)
+ idx += 1
+ continue
+ gt_bboxes = gt_bboxes_raw[hit_indices]
+ gt_labels = gt_labels_raw[hit_indices]
+ gt_masks = gt_masks_raw[hit_indices, ...]
+
+ half_ws = 0.5 * (
+ gt_bboxes[:, 2] - gt_bboxes[:, 0]) * self.coord_sigma
+ half_hs = 0.5 * (
+ gt_bboxes[:, 3] - gt_bboxes[:, 1]) * self.coord_sigma
+
+ for seg_mask, gt_label, half_h, half_w in zip(
+ gt_masks, gt_labels, half_hs, half_ws):
+ if seg_mask.sum() == 0:
+ continue
+ # mass center
+ upsampled_size = (mask_feat_size[0] * 4,
+ mask_feat_size[1] * 4)
+ center_h, center_w = ndimage.measurements.center_of_mass(
+ seg_mask)
+ coord_w = int(
+ (center_w / upsampled_size[1]) // (1. / num_grid))
+ coord_h = int(
+ (center_h / upsampled_size[0]) // (1. / num_grid))
+
+ # left, top, right, down
+ top_box = max(0,
+ int(((center_h - half_h) / upsampled_size[0])
+ // (1. / num_grid)))
+ down_box = min(num_grid - 1,
+ int(((center_h + half_h) / upsampled_size[0])
+ // (1. / num_grid)))
+ left_box = max(0,
+ int(((center_w - half_w) / upsampled_size[1])
+ // (1. / num_grid)))
+ right_box = min(num_grid - 1,
+ int(((center_w + half_w) /
+ upsampled_size[1]) // (1. / num_grid)))
+
+ top = max(top_box, coord_h - 1)
+ down = min(down_box, coord_h + 1)
+ left = max(coord_w - 1, left_box)
+ right = min(right_box, coord_w + 1)
+
+ cate_label[top:(down + 1), left:(right + 1)] = gt_label
+ seg_mask = self._scale_size(
+ seg_mask, scale=1. / self.sampling_ratio)
+ for i in range(top, down + 1):
+ for j in range(left, right + 1):
+ label = int(i * num_grid + j)
+ cur_ins_label = np.zeros(
+ [mask_feat_size[0], mask_feat_size[1]],
+ dtype=np.uint8)
+ cur_ins_label[:seg_mask.shape[0], :seg_mask.shape[
+ 1]] = seg_mask
+ ins_label.append(cur_ins_label)
+ ins_ind_label[label] = True
+ grid_order.append(sample_id * num_grid * num_grid +
+ label)
+ if ins_label == []:
+ ins_label = np.zeros(
+ [1, mask_feat_size[0], mask_feat_size[1]],
+ dtype=np.uint8)
+ ins_ind_label_list.append(ins_ind_label)
+ sample['cate_label{}'.format(idx)] = cate_label.flatten()
+ sample['ins_label{}'.format(idx)] = ins_label
+ sample['grid_order{}'.format(idx)] = np.asarray(
+ [sample_id * num_grid * num_grid + 0], dtype=np.int32)
+ else:
+ ins_label = np.stack(ins_label, axis=0)
+ ins_ind_label_list.append(ins_ind_label)
+ sample['cate_label{}'.format(idx)] = cate_label.flatten()
+ sample['ins_label{}'.format(idx)] = ins_label
+ sample['grid_order{}'.format(idx)] = np.asarray(
+ grid_order, dtype=np.int32)
+ assert len(grid_order) > 0
+ max_ins_num[idx] = max(
+ max_ins_num[idx],
+ sample['ins_label{}'.format(idx)].shape[0])
+ idx += 1
+ ins_ind_labels = np.concatenate([
+ ins_ind_labels_level_img
+ for ins_ind_labels_level_img in ins_ind_label_list
+ ])
+ fg_num = np.sum(ins_ind_labels)
+ sample['fg_num'] = fg_num
+ sample_id += 1
+
+ sample.pop('is_crowd')
+ sample.pop('gt_class')
+ sample.pop('gt_bbox')
+ sample.pop('gt_poly')
+ sample.pop('gt_segm')
+
+ # padding batch
+ for data in samples:
+ for idx in range(len(self.num_grids)):
+ gt_ins_data = np.zeros(
+ [
+ max_ins_num[idx],
+ data['ins_label{}'.format(idx)].shape[1],
+ data['ins_label{}'.format(idx)].shape[2]
+ ],
+ dtype=np.uint8)
+ gt_ins_data[0:data['ins_label{}'.format(idx)].shape[
+ 0], :, :] = data['ins_label{}'.format(idx)]
+ gt_grid_order = np.zeros([max_ins_num[idx]], dtype=np.int32)
+ gt_grid_order[0:data['grid_order{}'.format(idx)].shape[
+ 0]] = data['grid_order{}'.format(idx)]
+ data['ins_label{}'.format(idx)] = gt_ins_data
+ data['grid_order{}'.format(idx)] = gt_grid_order
+
+ return samples
+
+
+@register_op
+class Gt2SparseRCNNTarget(BaseOperator):
+ '''
+ Generate SparseRCNN targets by groud truth data
+ '''
+
+ def __init__(self):
+ super(Gt2SparseRCNNTarget, self).__init__()
+
+ def __call__(self, samples, context=None):
+ for sample in samples:
+ im = sample["image"]
+ h, w = im.shape[1:3]
+ img_whwh = np.array([w, h, w, h], dtype=np.int32)
+ sample["img_whwh"] = img_whwh
+ if "scale_factor" in sample:
+ sample["scale_factor_wh"] = np.array(
+ [sample["scale_factor"][1], sample["scale_factor"][0]],
+ dtype=np.float32)
+ else:
+ sample["scale_factor_wh"] = np.array(
+ [1.0, 1.0], dtype=np.float32)
+
+ return samples
+
+
+@register_op
+class PadMaskBatch(BaseOperator):
+ """
+ Pad a batch of samples so they can be divisible by a stride.
+ The layout of each image should be 'CHW'.
+ Args:
+ pad_to_stride (int): If `pad_to_stride > 0`, pad zeros to ensure
+ height and width is divisible by `pad_to_stride`.
+ return_pad_mask (bool): If `return_pad_mask = True`, return
+ `pad_mask` for transformer.
+ """
+
+ def __init__(self, pad_to_stride=0, return_pad_mask=False):
+ super(PadMaskBatch, self).__init__()
+ self.pad_to_stride = pad_to_stride
+ self.return_pad_mask = return_pad_mask
+
+ def __call__(self, samples, context=None):
+ """
+ Args:
+ samples (list): a batch of sample, each is dict.
+ """
+ coarsest_stride = self.pad_to_stride
+
+ max_shape = np.array([data['image'].shape for data in samples]).max(
+ axis=0)
+ if coarsest_stride > 0:
+ max_shape[1] = int(
+ np.ceil(max_shape[1] / coarsest_stride) * coarsest_stride)
+ max_shape[2] = int(
+ np.ceil(max_shape[2] / coarsest_stride) * coarsest_stride)
+
+ for data in samples:
+ im = data['image']
+ im_c, im_h, im_w = im.shape[:]
+ padding_im = np.zeros(
+ (im_c, max_shape[1], max_shape[2]), dtype=np.float32)
+ padding_im[:, :im_h, :im_w] = im
+ data['image'] = padding_im
+ if 'semantic' in data and data['semantic'] is not None:
+ semantic = data['semantic']
+ padding_sem = np.zeros(
+ (1, max_shape[1], max_shape[2]), dtype=np.float32)
+ padding_sem[:, :im_h, :im_w] = semantic
+ data['semantic'] = padding_sem
+ if 'gt_segm' in data and data['gt_segm'] is not None:
+ gt_segm = data['gt_segm']
+ padding_segm = np.zeros(
+ (gt_segm.shape[0], max_shape[1], max_shape[2]),
+ dtype=np.uint8)
+ padding_segm[:, :im_h, :im_w] = gt_segm
+ data['gt_segm'] = padding_segm
+ if self.return_pad_mask:
+ padding_mask = np.zeros(
+ (max_shape[1], max_shape[2]), dtype=np.float32)
+ padding_mask[:im_h, :im_w] = 1.
+ data['pad_mask'] = padding_mask
+
+ if 'gt_rbox2poly' in data and data['gt_rbox2poly'] is not None:
+ # ploy to rbox
+ polys = data['gt_rbox2poly']
+ rbox = bbox_utils.poly2rbox(polys)
+ data['gt_rbox'] = rbox
+
+ return samples
+
+
+@register_op
+class Gt2CenterNetTarget(BaseOperator):
+ """Gt2CenterNetTarget
+ Genterate CenterNet targets by ground-truth
+ Args:
+ down_ratio (int): The down sample ratio between output feature and
+ input image.
+ num_classes (int): The number of classes, 80 by default.
+ max_objs (int): The maximum objects detected, 128 by default.
+ """
+
+ def __init__(self, down_ratio, num_classes=80, max_objs=128):
+ super(Gt2CenterNetTarget, self).__init__()
+ self.down_ratio = down_ratio
+ self.num_classes = num_classes
+ self.max_objs = max_objs
+
+ def __call__(self, sample, context=None):
+ input_h, input_w = sample['image'].shape[1:]
+ output_h = input_h // self.down_ratio
+ output_w = input_w // self.down_ratio
+ num_classes = self.num_classes
+ c = sample['center']
+ s = sample['scale']
+ gt_bbox = sample['gt_bbox']
+ gt_class = sample['gt_class']
+
+ hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
+ wh = np.zeros((self.max_objs, 2), dtype=np.float32)
+ dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)
+ reg = np.zeros((self.max_objs, 2), dtype=np.float32)
+ ind = np.zeros((self.max_objs), dtype=np.int64)
+ reg_mask = np.zeros((self.max_objs), dtype=np.int32)
+ cat_spec_wh = np.zeros(
+ (self.max_objs, num_classes * 2), dtype=np.float32)
+ cat_spec_mask = np.zeros(
+ (self.max_objs, num_classes * 2), dtype=np.int32)
+
+ trans_output = get_affine_transform(c, [s, s], 0, [output_w, output_h])
+
+ gt_det = []
+ for i, (bbox, cls) in enumerate(zip(gt_bbox, gt_class)):
+ cls = int(cls)
+ bbox[:2] = affine_transform(bbox[:2], trans_output)
+ bbox[2:] = affine_transform(bbox[2:], trans_output)
+ bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
+ bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
+ h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
+ if h > 0 and w > 0:
+ radius = gaussian_radius((math.ceil(h), math.ceil(w)), 0.7)
+ radius = max(0, int(radius))
+ ct = np.array(
+ [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2],
+ dtype=np.float32)
+ ct_int = ct.astype(np.int32)
+ draw_umich_gaussian(hm[cls], ct_int, radius)
+ wh[i] = 1. * w, 1. * h
+ ind[i] = ct_int[1] * output_w + ct_int[0]
+ reg[i] = ct - ct_int
+ reg_mask[i] = 1
+ cat_spec_wh[i, cls * 2:cls * 2 + 2] = wh[i]
+ cat_spec_mask[i, cls * 2:cls * 2 + 2] = 1
+ gt_det.append([
+ ct[0] - w / 2, ct[1] - h / 2, ct[0] + w / 2, ct[1] + h / 2,
+ 1, cls
+ ])
+
+ sample.pop('gt_bbox', None)
+ sample.pop('gt_class', None)
+ sample.pop('center', None)
+ sample.pop('scale', None)
+ sample.pop('is_crowd', None)
+ sample.pop('difficult', None)
+ sample['heatmap'] = hm
+ sample['index_mask'] = reg_mask
+ sample['index'] = ind
+ sample['size'] = wh
+ sample['offset'] = reg
+ return sample
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/gridmask_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/gridmask_utils.py
new file mode 100644
index 000000000..c18701556
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/gridmask_utils.py
@@ -0,0 +1,86 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on:
+# https://github.com/dvlab-research/GridMask/blob/master/detection_grid/maskrcnn_benchmark/data/transforms/grid.py
+
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import division
+
+import numpy as np
+from PIL import Image
+
+
+class Gridmask(object):
+ def __init__(self,
+ use_h=True,
+ use_w=True,
+ rotate=1,
+ offset=False,
+ ratio=0.5,
+ mode=1,
+ prob=0.7,
+ upper_iter=360000):
+ super(Gridmask, self).__init__()
+ self.use_h = use_h
+ self.use_w = use_w
+ self.rotate = rotate
+ self.offset = offset
+ self.ratio = ratio
+ self.mode = mode
+ self.prob = prob
+ self.st_prob = prob
+ self.upper_iter = upper_iter
+
+ def __call__(self, x, curr_iter):
+ self.prob = self.st_prob * min(1, 1.0 * curr_iter / self.upper_iter)
+ if np.random.rand() > self.prob:
+ return x
+ h, w, _ = x.shape
+ hh = int(1.5 * h)
+ ww = int(1.5 * w)
+ d = np.random.randint(2, h)
+ self.l = min(max(int(d * self.ratio + 0.5), 1), d - 1)
+ mask = np.ones((hh, ww), np.float32)
+ st_h = np.random.randint(d)
+ st_w = np.random.randint(d)
+ if self.use_h:
+ for i in range(hh // d):
+ s = d * i + st_h
+ t = min(s + self.l, hh)
+ mask[s:t, :] *= 0
+ if self.use_w:
+ for i in range(ww // d):
+ s = d * i + st_w
+ t = min(s + self.l, ww)
+ mask[:, s:t] *= 0
+
+ r = np.random.randint(self.rotate)
+ mask = Image.fromarray(np.uint8(mask))
+ mask = mask.rotate(r)
+ mask = np.asarray(mask)
+ mask = mask[(hh - h) // 2:(hh - h) // 2 + h, (ww - w) // 2:(ww - w) // 2
+ + w].astype(np.float32)
+
+ if self.mode == 1:
+ mask = 1 - mask
+ mask = np.expand_dims(mask, axis=-1)
+ if self.offset:
+ offset = (2 * (np.random.rand(h, w) - 0.5)).astype(np.float32)
+ x = (x * mask + offset * (1 - mask)).astype(x.dtype)
+ else:
+ x = (x * mask).astype(x.dtype)
+
+ return x
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/keypoint_operators.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/keypoint_operators.py
new file mode 100644
index 000000000..81770b63e
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/keypoint_operators.py
@@ -0,0 +1,859 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# function:
+# operators to process sample,
+# eg: decode/resize/crop image
+
+from __future__ import absolute_import
+
+try:
+ from collections.abc import Sequence
+except Exception:
+ from collections import Sequence
+
+import cv2
+import numpy as np
+import math
+import copy
+
+from ...modeling.keypoint_utils import get_affine_mat_kernel, warp_affine_joints, get_affine_transform, affine_transform, get_warp_matrix
+from ppdet.core.workspace import serializable
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+registered_ops = []
+
+__all__ = [
+ 'RandomAffine',
+ 'KeyPointFlip',
+ 'TagGenerate',
+ 'ToHeatmaps',
+ 'NormalizePermute',
+ 'EvalAffine',
+ 'RandomFlipHalfBodyTransform',
+ 'TopDownAffine',
+ 'ToHeatmapsTopDown',
+ 'ToHeatmapsTopDown_DARK',
+ 'ToHeatmapsTopDown_UDP',
+ 'TopDownEvalAffine',
+ 'AugmentationbyInformantionDropping',
+]
+
+
+def register_keypointop(cls):
+ return serializable(cls)
+
+
+@register_keypointop
+class KeyPointFlip(object):
+ """Get the fliped image by flip_prob. flip the coords also
+ the left coords and right coords should exchange while flip, for the right keypoint will be left keypoint after image fliped
+
+ Args:
+ flip_permutation (list[17]): the left-right exchange order list corresponding to [0,1,2,...,16]
+ hmsize (list[2]): output heatmap's shape list of different scale outputs of higherhrnet
+ flip_prob (float): the ratio whether to flip the image
+ records(dict): the dict contained the image, mask and coords
+
+ Returns:
+ records(dict): contain the image, mask and coords after tranformed
+
+ """
+
+ def __init__(self, flip_permutation, hmsize, flip_prob=0.5):
+ super(KeyPointFlip, self).__init__()
+ assert isinstance(flip_permutation, Sequence)
+ self.flip_permutation = flip_permutation
+ self.flip_prob = flip_prob
+ self.hmsize = hmsize
+
+ def __call__(self, records):
+ image = records['image']
+ kpts_lst = records['joints']
+ mask_lst = records['mask']
+ flip = np.random.random() < self.flip_prob
+ if flip:
+ image = image[:, ::-1]
+ for idx, hmsize in enumerate(self.hmsize):
+ if len(mask_lst) > idx:
+ mask_lst[idx] = mask_lst[idx][:, ::-1]
+ if kpts_lst[idx].ndim == 3:
+ kpts_lst[idx] = kpts_lst[idx][:, self.flip_permutation]
+ else:
+ kpts_lst[idx] = kpts_lst[idx][self.flip_permutation]
+ kpts_lst[idx][..., 0] = hmsize - kpts_lst[idx][..., 0]
+ kpts_lst[idx] = kpts_lst[idx].astype(np.int64)
+ kpts_lst[idx][kpts_lst[idx][..., 0] >= hmsize, 2] = 0
+ kpts_lst[idx][kpts_lst[idx][..., 1] >= hmsize, 2] = 0
+ kpts_lst[idx][kpts_lst[idx][..., 0] < 0, 2] = 0
+ kpts_lst[idx][kpts_lst[idx][..., 1] < 0, 2] = 0
+ records['image'] = image
+ records['joints'] = kpts_lst
+ records['mask'] = mask_lst
+ return records
+
+
+@register_keypointop
+class RandomAffine(object):
+ """apply affine transform to image, mask and coords
+ to achieve the rotate, scale and shift effect for training image
+
+ Args:
+ max_degree (float): the max abslute rotate degree to apply, transform range is [-max_degree, max_degree]
+ max_scale (list[2]): the scale range to apply, transform range is [min, max]
+ max_shift (float): the max abslute shift ratio to apply, transform range is [-max_shift*imagesize, max_shift*imagesize]
+ hmsize (list[2]): output heatmap's shape list of different scale outputs of higherhrnet
+ trainsize (int): the standard length used to train, the 'scale_type' of [h,w] will be resize to trainsize for standard
+ scale_type (str): the length of [h,w] to used for trainsize, chosed between 'short' and 'long'
+ records(dict): the dict contained the image, mask and coords
+
+ Returns:
+ records(dict): contain the image, mask and coords after tranformed
+
+ """
+
+ def __init__(self,
+ max_degree=30,
+ scale=[0.75, 1.5],
+ max_shift=0.2,
+ hmsize=[128, 256],
+ trainsize=512,
+ scale_type='short'):
+ super(RandomAffine, self).__init__()
+ self.max_degree = max_degree
+ self.min_scale = scale[0]
+ self.max_scale = scale[1]
+ self.max_shift = max_shift
+ self.hmsize = hmsize
+ self.trainsize = trainsize
+ self.scale_type = scale_type
+
+ def _get_affine_matrix(self, center, scale, res, rot=0):
+ """Generate transformation matrix."""
+ h = scale
+ t = np.zeros((3, 3), dtype=np.float32)
+ t[0, 0] = float(res[1]) / h
+ t[1, 1] = float(res[0]) / h
+ t[0, 2] = res[1] * (-float(center[0]) / h + .5)
+ t[1, 2] = res[0] * (-float(center[1]) / h + .5)
+ t[2, 2] = 1
+ if rot != 0:
+ rot = -rot # To match direction of rotation from cropping
+ rot_mat = np.zeros((3, 3), dtype=np.float32)
+ rot_rad = rot * np.pi / 180
+ sn, cs = np.sin(rot_rad), np.cos(rot_rad)
+ rot_mat[0, :2] = [cs, -sn]
+ rot_mat[1, :2] = [sn, cs]
+ rot_mat[2, 2] = 1
+ # Need to rotate around center
+ t_mat = np.eye(3)
+ t_mat[0, 2] = -res[1] / 2
+ t_mat[1, 2] = -res[0] / 2
+ t_inv = t_mat.copy()
+ t_inv[:2, 2] *= -1
+ t = np.dot(t_inv, np.dot(rot_mat, np.dot(t_mat, t)))
+ return t
+
+ def __call__(self, records):
+ image = records['image']
+ keypoints = records['joints']
+ heatmap_mask = records['mask']
+
+ degree = (np.random.random() * 2 - 1) * self.max_degree
+ shape = np.array(image.shape[:2][::-1])
+ center = center = np.array((np.array(shape) / 2))
+
+ aug_scale = np.random.random() * (self.max_scale - self.min_scale
+ ) + self.min_scale
+ if self.scale_type == 'long':
+ scale = max(shape[0], shape[1]) / 1.0
+ elif self.scale_type == 'short':
+ scale = min(shape[0], shape[1]) / 1.0
+ else:
+ raise ValueError('Unknown scale type: {}'.format(self.scale_type))
+ roi_size = aug_scale * scale
+ dx = int(0)
+ dy = int(0)
+ if self.max_shift > 0:
+
+ dx = np.random.randint(-self.max_shift * roi_size,
+ self.max_shift * roi_size)
+ dy = np.random.randint(-self.max_shift * roi_size,
+ self.max_shift * roi_size)
+
+ center += np.array([dx, dy])
+ input_size = 2 * center
+
+ keypoints[..., :2] *= shape
+ heatmap_mask *= 255
+ kpts_lst = []
+ mask_lst = []
+
+ image_affine_mat = self._get_affine_matrix(
+ center, roi_size, (self.trainsize, self.trainsize), degree)[:2]
+ image = cv2.warpAffine(
+ image,
+ image_affine_mat, (self.trainsize, self.trainsize),
+ flags=cv2.INTER_LINEAR)
+ for hmsize in self.hmsize:
+ kpts = copy.deepcopy(keypoints)
+ mask_affine_mat = self._get_affine_matrix(
+ center, roi_size, (hmsize, hmsize), degree)[:2]
+ if heatmap_mask is not None:
+ mask = cv2.warpAffine(heatmap_mask, mask_affine_mat,
+ (hmsize, hmsize))
+ mask = ((mask / 255) > 0.5).astype(np.float32)
+ kpts[..., 0:2] = warp_affine_joints(kpts[..., 0:2].copy(),
+ mask_affine_mat)
+ kpts[np.trunc(kpts[..., 0]) >= hmsize, 2] = 0
+ kpts[np.trunc(kpts[..., 1]) >= hmsize, 2] = 0
+ kpts[np.trunc(kpts[..., 0]) < 0, 2] = 0
+ kpts[np.trunc(kpts[..., 1]) < 0, 2] = 0
+ kpts_lst.append(kpts)
+ mask_lst.append(mask)
+ records['image'] = image
+ records['joints'] = kpts_lst
+ records['mask'] = mask_lst
+ return records
+
+
+@register_keypointop
+class EvalAffine(object):
+ """apply affine transform to image
+ resize the short of [h,w] to standard size for eval
+
+ Args:
+ size (int): the standard length used to train, the 'short' of [h,w] will be resize to trainsize for standard
+ records(dict): the dict contained the image, mask and coords
+
+ Returns:
+ records(dict): contain the image, mask and coords after tranformed
+
+ """
+
+ def __init__(self, size, stride=64):
+ super(EvalAffine, self).__init__()
+ self.size = size
+ self.stride = stride
+
+ def __call__(self, records):
+ image = records['image']
+ mask = records['mask'] if 'mask' in records else None
+ s = self.size
+ h, w, _ = image.shape
+ trans, size_resized = get_affine_mat_kernel(h, w, s, inv=False)
+ image_resized = cv2.warpAffine(image, trans, size_resized)
+ if mask is not None:
+ mask = cv2.warpAffine(mask, trans, size_resized)
+ records['mask'] = mask
+ if 'joints' in records:
+ del records['joints']
+ records['image'] = image_resized
+ return records
+
+
+@register_keypointop
+class NormalizePermute(object):
+ def __init__(self,
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.120, 57.375],
+ is_scale=True):
+ super(NormalizePermute, self).__init__()
+ self.mean = mean
+ self.std = std
+ self.is_scale = is_scale
+
+ def __call__(self, records):
+ image = records['image']
+ image = image.astype(np.float32)
+ if self.is_scale:
+ image /= 255.
+ image = image.transpose((2, 0, 1))
+ mean = np.array(self.mean, dtype=np.float32)
+ std = np.array(self.std, dtype=np.float32)
+ invstd = 1. / std
+ for v, m, s in zip(image, mean, invstd):
+ v.__isub__(m).__imul__(s)
+ records['image'] = image
+ return records
+
+
+@register_keypointop
+class TagGenerate(object):
+ """record gt coords for aeloss to sample coords value in tagmaps
+
+ Args:
+ num_joints (int): the keypoint numbers of dataset to train
+ num_people (int): maxmum people to support for sample aeloss
+ records(dict): the dict contained the image, mask and coords
+
+ Returns:
+ records(dict): contain the gt coords used in tagmap
+
+ """
+
+ def __init__(self, num_joints, max_people=30):
+ super(TagGenerate, self).__init__()
+ self.max_people = max_people
+ self.num_joints = num_joints
+
+ def __call__(self, records):
+ kpts_lst = records['joints']
+ kpts = kpts_lst[0]
+ tagmap = np.zeros((self.max_people, self.num_joints, 4), dtype=np.int64)
+ inds = np.where(kpts[..., 2] > 0)
+ p, j = inds[0], inds[1]
+ visible = kpts[inds]
+ # tagmap is [p, j, 3], where last dim is j, y, x
+ tagmap[p, j, 0] = j
+ tagmap[p, j, 1] = visible[..., 1] # y
+ tagmap[p, j, 2] = visible[..., 0] # x
+ tagmap[p, j, 3] = 1
+ records['tagmap'] = tagmap
+ del records['joints']
+ return records
+
+
+@register_keypointop
+class ToHeatmaps(object):
+ """to generate the gaussin heatmaps of keypoint for heatmap loss
+
+ Args:
+ num_joints (int): the keypoint numbers of dataset to train
+ hmsize (list[2]): output heatmap's shape list of different scale outputs of higherhrnet
+ sigma (float): the std of gaussin kernel genereted
+ records(dict): the dict contained the image, mask and coords
+
+ Returns:
+ records(dict): contain the heatmaps used to heatmaploss
+
+ """
+
+ def __init__(self, num_joints, hmsize, sigma=None):
+ super(ToHeatmaps, self).__init__()
+ self.num_joints = num_joints
+ self.hmsize = np.array(hmsize)
+ if sigma is None:
+ sigma = hmsize[0] // 64
+ self.sigma = sigma
+
+ r = 6 * sigma + 3
+ x = np.arange(0, r, 1, np.float32)
+ y = x[:, None]
+ x0, y0 = 3 * sigma + 1, 3 * sigma + 1
+ self.gaussian = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2))
+
+ def __call__(self, records):
+ kpts_lst = records['joints']
+ mask_lst = records['mask']
+ for idx, hmsize in enumerate(self.hmsize):
+ mask = mask_lst[idx]
+ kpts = kpts_lst[idx]
+ heatmaps = np.zeros((self.num_joints, hmsize, hmsize))
+ inds = np.where(kpts[..., 2] > 0)
+ visible = kpts[inds].astype(np.int64)[..., :2]
+ ul = np.round(visible - 3 * self.sigma - 1)
+ br = np.round(visible + 3 * self.sigma + 2)
+ sul = np.maximum(0, -ul)
+ sbr = np.minimum(hmsize, br) - ul
+ dul = np.clip(ul, 0, hmsize - 1)
+ dbr = np.clip(br, 0, hmsize)
+ for i in range(len(visible)):
+ if visible[i][0] < 0 or visible[i][1] < 0 or visible[i][
+ 0] >= hmsize or visible[i][1] >= hmsize:
+ continue
+ dx1, dy1 = dul[i]
+ dx2, dy2 = dbr[i]
+ sx1, sy1 = sul[i]
+ sx2, sy2 = sbr[i]
+ heatmaps[inds[1][i], dy1:dy2, dx1:dx2] = np.maximum(
+ self.gaussian[sy1:sy2, sx1:sx2],
+ heatmaps[inds[1][i], dy1:dy2, dx1:dx2])
+ records['heatmap_gt{}x'.format(idx + 1)] = heatmaps
+ records['mask_{}x'.format(idx + 1)] = mask
+ del records['mask']
+ return records
+
+
+@register_keypointop
+class RandomFlipHalfBodyTransform(object):
+ """apply data augment to image and coords
+ to achieve the flip, scale, rotate and half body transform effect for training image
+
+ Args:
+ trainsize (list):[w, h], Image target size
+ upper_body_ids (list): The upper body joint ids
+ flip_pairs (list): The left-right joints exchange order list
+ pixel_std (int): The pixel std of the scale
+ scale (float): The scale factor to transform the image
+ rot (int): The rotate factor to transform the image
+ num_joints_half_body (int): The joints threshold of the half body transform
+ prob_half_body (float): The threshold of the half body transform
+ flip (bool): Whether to flip the image
+
+ Returns:
+ records(dict): contain the image and coords after tranformed
+
+ """
+
+ def __init__(self,
+ trainsize,
+ upper_body_ids,
+ flip_pairs,
+ pixel_std,
+ scale=0.35,
+ rot=40,
+ num_joints_half_body=8,
+ prob_half_body=0.3,
+ flip=True,
+ rot_prob=0.6):
+ super(RandomFlipHalfBodyTransform, self).__init__()
+ self.trainsize = trainsize
+ self.upper_body_ids = upper_body_ids
+ self.flip_pairs = flip_pairs
+ self.pixel_std = pixel_std
+ self.scale = scale
+ self.rot = rot
+ self.num_joints_half_body = num_joints_half_body
+ self.prob_half_body = prob_half_body
+ self.flip = flip
+ self.aspect_ratio = trainsize[0] * 1.0 / trainsize[1]
+ self.rot_prob = rot_prob
+
+ def halfbody_transform(self, joints, joints_vis):
+ upper_joints = []
+ lower_joints = []
+ for joint_id in range(joints.shape[0]):
+ if joints_vis[joint_id][0] > 0:
+ if joint_id in self.upper_body_ids:
+ upper_joints.append(joints[joint_id])
+ else:
+ lower_joints.append(joints[joint_id])
+ if np.random.randn() < 0.5 and len(upper_joints) > 2:
+ selected_joints = upper_joints
+ else:
+ selected_joints = lower_joints if len(
+ lower_joints) > 2 else upper_joints
+ if len(selected_joints) < 2:
+ return None, None
+ selected_joints = np.array(selected_joints, dtype=np.float32)
+ center = selected_joints.mean(axis=0)[:2]
+ left_top = np.amin(selected_joints, axis=0)
+ right_bottom = np.amax(selected_joints, axis=0)
+ w = right_bottom[0] - left_top[0]
+ h = right_bottom[1] - left_top[1]
+ if w > self.aspect_ratio * h:
+ h = w * 1.0 / self.aspect_ratio
+ elif w < self.aspect_ratio * h:
+ w = h * self.aspect_ratio
+ scale = np.array(
+ [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],
+ dtype=np.float32)
+ scale = scale * 1.5
+
+ return center, scale
+
+ def flip_joints(self, joints, joints_vis, width, matched_parts):
+ joints[:, 0] = width - joints[:, 0] - 1
+ for pair in matched_parts:
+ joints[pair[0], :], joints[pair[1], :] = \
+ joints[pair[1], :], joints[pair[0], :].copy()
+ joints_vis[pair[0], :], joints_vis[pair[1], :] = \
+ joints_vis[pair[1], :], joints_vis[pair[0], :].copy()
+
+ return joints * joints_vis, joints_vis
+
+ def __call__(self, records):
+ image = records['image']
+ joints = records['joints']
+ joints_vis = records['joints_vis']
+ c = records['center']
+ s = records['scale']
+ r = 0
+ if (np.sum(joints_vis[:, 0]) > self.num_joints_half_body and
+ np.random.rand() < self.prob_half_body):
+ c_half_body, s_half_body = self.halfbody_transform(joints,
+ joints_vis)
+ if c_half_body is not None and s_half_body is not None:
+ c, s = c_half_body, s_half_body
+ sf = self.scale
+ rf = self.rot
+ s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
+ r = np.clip(np.random.randn() * rf, -rf * 2,
+ rf * 2) if np.random.random() <= self.rot_prob else 0
+
+ if self.flip and np.random.random() <= 0.5:
+ image = image[:, ::-1, :]
+ joints, joints_vis = self.flip_joints(
+ joints, joints_vis, image.shape[1], self.flip_pairs)
+ c[0] = image.shape[1] - c[0] - 1
+ records['image'] = image
+ records['joints'] = joints
+ records['joints_vis'] = joints_vis
+ records['center'] = c
+ records['scale'] = s
+ records['rotate'] = r
+
+ return records
+
+
+@register_keypointop
+class AugmentationbyInformantionDropping(object):
+ """AID: Augmentation by Informantion Dropping. Please refer
+ to https://arxiv.org/abs/2008.07139
+
+ Args:
+ prob_cutout (float): The probability of the Cutout augmentation.
+ offset_factor (float): Offset factor of cutout center.
+ num_patch (int): Number of patches to be cutout.
+ records(dict): the dict contained the image and coords
+
+ Returns:
+ records (dict): contain the image and coords after tranformed
+
+ """
+
+ def __init__(self,
+ trainsize,
+ prob_cutout=0.0,
+ offset_factor=0.2,
+ num_patch=1):
+ self.prob_cutout = prob_cutout
+ self.offset_factor = offset_factor
+ self.num_patch = num_patch
+ self.trainsize = trainsize
+
+ def _cutout(self, img, joints, joints_vis):
+ height, width, _ = img.shape
+ img = img.reshape((height * width, -1))
+ feat_x_int = np.arange(0, width)
+ feat_y_int = np.arange(0, height)
+ feat_x_int, feat_y_int = np.meshgrid(feat_x_int, feat_y_int)
+ feat_x_int = feat_x_int.reshape((-1, ))
+ feat_y_int = feat_y_int.reshape((-1, ))
+ for _ in range(self.num_patch):
+ vis_idx, _ = np.where(joints_vis > 0)
+ occlusion_joint_id = np.random.choice(vis_idx)
+ center = joints[occlusion_joint_id, 0:2]
+ offset = np.random.randn(2) * self.trainsize[0] * self.offset_factor
+ center = center + offset
+ radius = np.random.uniform(0.1, 0.2) * self.trainsize[0]
+ x_offset = (center[0] - feat_x_int) / radius
+ y_offset = (center[1] - feat_y_int) / radius
+ dis = x_offset**2 + y_offset**2
+ keep_pos = np.where((dis <= 1) & (dis >= 0))[0]
+ img[keep_pos, :] = 0
+ img = img.reshape((height, width, -1))
+ return img
+
+ def __call__(self, records):
+ img = records['image']
+ joints = records['joints']
+ joints_vis = records['joints_vis']
+ if np.random.rand() < self.prob_cutout:
+ img = self._cutout(img, joints, joints_vis)
+ records['image'] = img
+ return records
+
+
+@register_keypointop
+class TopDownAffine(object):
+ """apply affine transform to image and coords
+
+ Args:
+ trainsize (list): [w, h], the standard size used to train
+ use_udp (bool): whether to use Unbiased Data Processing.
+ records(dict): the dict contained the image and coords
+
+ Returns:
+ records (dict): contain the image and coords after tranformed
+
+ """
+
+ def __init__(self, trainsize, use_udp=False):
+ self.trainsize = trainsize
+ self.use_udp = use_udp
+
+ def __call__(self, records):
+ image = records['image']
+ joints = records['joints']
+ joints_vis = records['joints_vis']
+ rot = records['rotate'] if "rotate" in records else 0
+ if self.use_udp:
+ trans = get_warp_matrix(
+ rot, records['center'] * 2.0,
+ [self.trainsize[0] - 1.0, self.trainsize[1] - 1.0],
+ records['scale'] * 200.0)
+ image = cv2.warpAffine(
+ image,
+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),
+ flags=cv2.INTER_LINEAR)
+ joints[:, 0:2] = warp_affine_joints(joints[:, 0:2].copy(), trans)
+ else:
+ trans = get_affine_transform(records['center'], records['scale'] *
+ 200, rot, self.trainsize)
+ image = cv2.warpAffine(
+ image,
+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),
+ flags=cv2.INTER_LINEAR)
+ for i in range(joints.shape[0]):
+ if joints_vis[i, 0] > 0.0:
+ joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
+
+ records['image'] = image
+ records['joints'] = joints
+
+ return records
+
+
+@register_keypointop
+class TopDownEvalAffine(object):
+ """apply affine transform to image and coords
+
+ Args:
+ trainsize (list): [w, h], the standard size used to train
+ use_udp (bool): whether to use Unbiased Data Processing.
+ records(dict): the dict contained the image and coords
+
+ Returns:
+ records (dict): contain the image and coords after tranformed
+
+ """
+
+ def __init__(self, trainsize, use_udp=False):
+ self.trainsize = trainsize
+ self.use_udp = use_udp
+
+ def __call__(self, records):
+ image = records['image']
+ rot = 0
+ imshape = records['im_shape'][::-1]
+ center = imshape / 2.
+ scale = imshape
+
+ if self.use_udp:
+ trans = get_warp_matrix(
+ rot, center * 2.0,
+ [self.trainsize[0] - 1.0, self.trainsize[1] - 1.0], scale)
+ image = cv2.warpAffine(
+ image,
+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),
+ flags=cv2.INTER_LINEAR)
+ else:
+ trans = get_affine_transform(center, scale, rot, self.trainsize)
+ image = cv2.warpAffine(
+ image,
+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),
+ flags=cv2.INTER_LINEAR)
+ records['image'] = image
+
+ return records
+
+
+@register_keypointop
+class ToHeatmapsTopDown(object):
+ """to generate the gaussin heatmaps of keypoint for heatmap loss
+
+ Args:
+ hmsize (list): [w, h] output heatmap's size
+ sigma (float): the std of gaussin kernel genereted
+ records(dict): the dict contained the image and coords
+
+ Returns:
+ records (dict): contain the heatmaps used to heatmaploss
+
+ """
+
+ def __init__(self, hmsize, sigma):
+ super(ToHeatmapsTopDown, self).__init__()
+ self.hmsize = np.array(hmsize)
+ self.sigma = sigma
+
+ def __call__(self, records):
+ joints = records['joints']
+ joints_vis = records['joints_vis']
+ num_joints = joints.shape[0]
+ image_size = np.array(
+ [records['image'].shape[1], records['image'].shape[0]])
+ target_weight = np.ones((num_joints, 1), dtype=np.float32)
+ target_weight[:, 0] = joints_vis[:, 0]
+ target = np.zeros(
+ (num_joints, self.hmsize[1], self.hmsize[0]), dtype=np.float32)
+ tmp_size = self.sigma * 3
+ feat_stride = image_size / self.hmsize
+ for joint_id in range(num_joints):
+ mu_x = int(joints[joint_id][0] + 0.5) / feat_stride[0]
+ mu_y = int(joints[joint_id][1] + 0.5) / feat_stride[1]
+ # Check that any part of the gaussian is in-bounds
+ ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
+ br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
+ if ul[0] >= self.hmsize[0] or ul[1] >= self.hmsize[1] or br[
+ 0] < 0 or br[1] < 0:
+ # If not, just return the image as is
+ target_weight[joint_id] = 0
+ continue
+ # # Generate gaussian
+ size = 2 * tmp_size + 1
+ x = np.arange(0, size, 1, np.float32)
+ y = x[:, np.newaxis]
+ x0 = y0 = size // 2
+ # The gaussian is not normalized, we want the center value to equal 1
+ g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * self.sigma**2))
+
+ # Usable gaussian range
+ g_x = max(0, -ul[0]), min(br[0], self.hmsize[0]) - ul[0]
+ g_y = max(0, -ul[1]), min(br[1], self.hmsize[1]) - ul[1]
+ # Image range
+ img_x = max(0, ul[0]), min(br[0], self.hmsize[0])
+ img_y = max(0, ul[1]), min(br[1], self.hmsize[1])
+
+ v = target_weight[joint_id]
+ if v > 0.5:
+ target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[
+ 0]:g_y[1], g_x[0]:g_x[1]]
+ records['target'] = target
+ records['target_weight'] = target_weight
+ del records['joints'], records['joints_vis']
+
+ return records
+
+
+@register_keypointop
+class ToHeatmapsTopDown_DARK(object):
+ """to generate the gaussin heatmaps of keypoint for heatmap loss
+
+ Args:
+ hmsize (list): [w, h] output heatmap's size
+ sigma (float): the std of gaussin kernel genereted
+ records(dict): the dict contained the image and coords
+
+ Returns:
+ records (dict): contain the heatmaps used to heatmaploss
+
+ """
+
+ def __init__(self, hmsize, sigma):
+ super(ToHeatmapsTopDown_DARK, self).__init__()
+ self.hmsize = np.array(hmsize)
+ self.sigma = sigma
+
+ def __call__(self, records):
+ joints = records['joints']
+ joints_vis = records['joints_vis']
+ num_joints = joints.shape[0]
+ image_size = np.array(
+ [records['image'].shape[1], records['image'].shape[0]])
+ target_weight = np.ones((num_joints, 1), dtype=np.float32)
+ target_weight[:, 0] = joints_vis[:, 0]
+ target = np.zeros(
+ (num_joints, self.hmsize[1], self.hmsize[0]), dtype=np.float32)
+ tmp_size = self.sigma * 3
+ feat_stride = image_size / self.hmsize
+ for joint_id in range(num_joints):
+ mu_x = joints[joint_id][0] / feat_stride[0]
+ mu_y = joints[joint_id][1] / feat_stride[1]
+ # Check that any part of the gaussian is in-bounds
+ ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
+ br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
+ if ul[0] >= self.hmsize[0] or ul[1] >= self.hmsize[1] or br[
+ 0] < 0 or br[1] < 0:
+ # If not, just return the image as is
+ target_weight[joint_id] = 0
+ continue
+
+ x = np.arange(0, self.hmsize[0], 1, np.float32)
+ y = np.arange(0, self.hmsize[1], 1, np.float32)
+ y = y[:, np.newaxis]
+
+ v = target_weight[joint_id]
+ if v > 0.5:
+ target[joint_id] = np.exp(-(
+ (x - mu_x)**2 + (y - mu_y)**2) / (2 * self.sigma**2))
+ records['target'] = target
+ records['target_weight'] = target_weight
+ del records['joints'], records['joints_vis']
+
+ return records
+
+
+@register_keypointop
+class ToHeatmapsTopDown_UDP(object):
+ """to generate the gaussian heatmaps of keypoint for heatmap loss.
+ ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing
+ for Human Pose Estimation (CVPR 2020).
+
+ Args:
+ hmsize (list): [w, h] output heatmap's size
+ sigma (float): the std of gaussin kernel genereted
+ records(dict): the dict contained the image and coords
+
+ Returns:
+ records (dict): contain the heatmaps used to heatmaploss
+ """
+
+ def __init__(self, hmsize, sigma):
+ super(ToHeatmapsTopDown_UDP, self).__init__()
+ self.hmsize = np.array(hmsize)
+ self.sigma = sigma
+
+ def __call__(self, records):
+ joints = records['joints']
+ joints_vis = records['joints_vis']
+ num_joints = joints.shape[0]
+ image_size = np.array(
+ [records['image'].shape[1], records['image'].shape[0]])
+ target_weight = np.ones((num_joints, 1), dtype=np.float32)
+ target_weight[:, 0] = joints_vis[:, 0]
+ target = np.zeros(
+ (num_joints, self.hmsize[1], self.hmsize[0]), dtype=np.float32)
+ tmp_size = self.sigma * 3
+ size = 2 * tmp_size + 1
+ x = np.arange(0, size, 1, np.float32)
+ y = x[:, None]
+ feat_stride = (image_size - 1.0) / (self.hmsize - 1.0)
+ for joint_id in range(num_joints):
+ mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)
+ mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)
+ # Check that any part of the gaussian is in-bounds
+ ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
+ br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
+ if ul[0] >= self.hmsize[0] or ul[1] >= self.hmsize[1] or br[
+ 0] < 0 or br[1] < 0:
+ # If not, just return the image as is
+ target_weight[joint_id] = 0
+ continue
+
+ mu_x_ac = joints[joint_id][0] / feat_stride[0]
+ mu_y_ac = joints[joint_id][1] / feat_stride[1]
+ x0 = y0 = size // 2
+ x0 += mu_x_ac - mu_x
+ y0 += mu_y_ac - mu_y
+ g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * self.sigma**2))
+ # Usable gaussian range
+ g_x = max(0, -ul[0]), min(br[0], self.hmsize[0]) - ul[0]
+ g_y = max(0, -ul[1]), min(br[1], self.hmsize[1]) - ul[1]
+ # Image range
+ img_x = max(0, ul[0]), min(br[0], self.hmsize[0])
+ img_y = max(0, ul[1]), min(br[1], self.hmsize[1])
+
+ v = target_weight[joint_id]
+ if v > 0.5:
+ target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[
+ 0]:g_y[1], g_x[0]:g_x[1]]
+ records['target'] = target
+ records['target_weight'] = target_weight
+ del records['joints'], records['joints_vis']
+
+ return records
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/mot_operators.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/mot_operators.py
new file mode 100644
index 000000000..ef7d7be45
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/mot_operators.py
@@ -0,0 +1,627 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+try:
+ from collections.abc import Sequence
+except Exception:
+ from collections import Sequence
+from numbers import Integral
+
+import cv2
+import copy
+import numpy as np
+import random
+import math
+
+from .operators import BaseOperator, register_op
+from .batch_operators import Gt2TTFTarget
+from ppdet.modeling.bbox_utils import bbox_iou_np_expand
+from ppdet.utils.logger import setup_logger
+from .op_helper import gaussian_radius
+logger = setup_logger(__name__)
+
+__all__ = [
+ 'RGBReverse', 'LetterBoxResize', 'MOTRandomAffine', 'Gt2JDETargetThres',
+ 'Gt2JDETargetMax', 'Gt2FairMOTTarget'
+]
+
+
+@register_op
+class RGBReverse(BaseOperator):
+ """RGB to BGR, or BGR to RGB, sensitive to MOTRandomAffine
+ """
+
+ def __init__(self):
+ super(RGBReverse, self).__init__()
+
+ def apply(self, sample, context=None):
+ im = sample['image']
+ sample['image'] = np.ascontiguousarray(im[:, :, ::-1])
+ return sample
+
+
+@register_op
+class LetterBoxResize(BaseOperator):
+ def __init__(self, target_size):
+ """
+ Resize image to target size, convert normalized xywh to pixel xyxy
+ format ([x_center, y_center, width, height] -> [x0, y0, x1, y1]).
+ Args:
+ target_size (int|list): image target size.
+ """
+ super(LetterBoxResize, self).__init__()
+ if not isinstance(target_size, (Integral, Sequence)):
+ raise TypeError(
+ "Type of target_size is invalid. Must be Integer or List or Tuple, now is {}".
+ format(type(target_size)))
+ if isinstance(target_size, Integral):
+ target_size = [target_size, target_size]
+ self.target_size = target_size
+
+ def apply_image(self, img, height, width, color=(127.5, 127.5, 127.5)):
+ # letterbox: resize a rectangular image to a padded rectangular
+ shape = img.shape[:2] # [height, width]
+ ratio_h = float(height) / shape[0]
+ ratio_w = float(width) / shape[1]
+ ratio = min(ratio_h, ratio_w)
+ new_shape = (round(shape[1] * ratio),
+ round(shape[0] * ratio)) # [width, height]
+ padw = (width - new_shape[0]) / 2
+ padh = (height - new_shape[1]) / 2
+ top, bottom = round(padh - 0.1), round(padh + 0.1)
+ left, right = round(padw - 0.1), round(padw + 0.1)
+
+ img = cv2.resize(
+ img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
+ img = cv2.copyMakeBorder(
+ img, top, bottom, left, right, cv2.BORDER_CONSTANT,
+ value=color) # padded rectangular
+ return img, ratio, padw, padh
+
+ def apply_bbox(self, bbox0, h, w, ratio, padw, padh):
+ bboxes = bbox0.copy()
+ bboxes[:, 0] = ratio * w * (bbox0[:, 0] - bbox0[:, 2] / 2) + padw
+ bboxes[:, 1] = ratio * h * (bbox0[:, 1] - bbox0[:, 3] / 2) + padh
+ bboxes[:, 2] = ratio * w * (bbox0[:, 0] + bbox0[:, 2] / 2) + padw
+ bboxes[:, 3] = ratio * h * (bbox0[:, 1] + bbox0[:, 3] / 2) + padh
+ return bboxes
+
+ def apply(self, sample, context=None):
+ """ Resize the image numpy.
+ """
+ im = sample['image']
+ h, w = sample['im_shape']
+ if not isinstance(im, np.ndarray):
+ raise TypeError("{}: image type is not numpy.".format(self))
+ if len(im.shape) != 3:
+ from PIL import UnidentifiedImageError
+ raise UnidentifiedImageError(
+ '{}: image is not 3-dimensional.'.format(self))
+
+ # apply image
+ height, width = self.target_size
+ img, ratio, padw, padh = self.apply_image(
+ im, height=height, width=width)
+
+ sample['image'] = img
+ new_shape = (round(h * ratio), round(w * ratio))
+ sample['im_shape'] = np.asarray(new_shape, dtype=np.float32)
+ sample['scale_factor'] = np.asarray([ratio, ratio], dtype=np.float32)
+
+ # apply bbox
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
+ sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], h, w, ratio,
+ padw, padh)
+ return sample
+
+
+@register_op
+class MOTRandomAffine(BaseOperator):
+ """
+ Affine transform to image and coords to achieve the rotate, scale and
+ shift effect for training image.
+
+ Args:
+ degrees (list[2]): the rotate range to apply, transform range is [min, max]
+ translate (list[2]): the translate range to apply, transform range is [min, max]
+ scale (list[2]): the scale range to apply, transform range is [min, max]
+ shear (list[2]): the shear range to apply, transform range is [min, max]
+ borderValue (list[3]): value used in case of a constant border when appling
+ the perspective transformation
+ reject_outside (bool): reject warped bounding bboxes outside of image
+
+ Returns:
+ records(dict): contain the image and coords after tranformed
+
+ """
+
+ def __init__(self,
+ degrees=(-5, 5),
+ translate=(0.10, 0.10),
+ scale=(0.50, 1.20),
+ shear=(-2, 2),
+ borderValue=(127.5, 127.5, 127.5),
+ reject_outside=True):
+ super(MOTRandomAffine, self).__init__()
+ self.degrees = degrees
+ self.translate = translate
+ self.scale = scale
+ self.shear = shear
+ self.borderValue = borderValue
+ self.reject_outside = reject_outside
+
+ def apply(self, sample, context=None):
+ # https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
+ border = 0 # width of added border (optional)
+
+ img = sample['image']
+ height, width = img.shape[0], img.shape[1]
+
+ # Rotation and Scale
+ R = np.eye(3)
+ a = random.random() * (self.degrees[1] - self.degrees[0]
+ ) + self.degrees[0]
+ s = random.random() * (self.scale[1] - self.scale[0]) + self.scale[0]
+ R[:2] = cv2.getRotationMatrix2D(
+ angle=a, center=(width / 2, height / 2), scale=s)
+
+ # Translation
+ T = np.eye(3)
+ T[0, 2] = (
+ random.random() * 2 - 1
+ ) * self.translate[0] * height + border # x translation (pixels)
+ T[1, 2] = (
+ random.random() * 2 - 1
+ ) * self.translate[1] * width + border # y translation (pixels)
+
+ # Shear
+ S = np.eye(3)
+ S[0, 1] = math.tan((random.random() *
+ (self.shear[1] - self.shear[0]) + self.shear[0]) *
+ math.pi / 180) # x shear (deg)
+ S[1, 0] = math.tan((random.random() *
+ (self.shear[1] - self.shear[0]) + self.shear[0]) *
+ math.pi / 180) # y shear (deg)
+
+ M = S @T @R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
+ imw = cv2.warpPerspective(
+ img,
+ M,
+ dsize=(width, height),
+ flags=cv2.INTER_LINEAR,
+ borderValue=self.borderValue) # BGR order borderValue
+
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
+ targets = sample['gt_bbox']
+ n = targets.shape[0]
+ points = targets.copy()
+ area0 = (points[:, 2] - points[:, 0]) * (
+ points[:, 3] - points[:, 1])
+
+ # warp points
+ xy = np.ones((n * 4, 3))
+ xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
+ n * 4, 2) # x1y1, x2y2, x1y2, x2y1
+ xy = (xy @M.T)[:, :2].reshape(n, 8)
+
+ # create new boxes
+ x = xy[:, [0, 2, 4, 6]]
+ y = xy[:, [1, 3, 5, 7]]
+ xy = np.concatenate(
+ (x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
+
+ # apply angle-based reduction
+ radians = a * math.pi / 180
+ reduction = max(abs(math.sin(radians)), abs(math.cos(radians)))**0.5
+ x = (xy[:, 2] + xy[:, 0]) / 2
+ y = (xy[:, 3] + xy[:, 1]) / 2
+ w = (xy[:, 2] - xy[:, 0]) * reduction
+ h = (xy[:, 3] - xy[:, 1]) * reduction
+ xy = np.concatenate(
+ (x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
+
+ # reject warped points outside of image
+ if self.reject_outside:
+ np.clip(xy[:, 0], 0, width, out=xy[:, 0])
+ np.clip(xy[:, 2], 0, width, out=xy[:, 2])
+ np.clip(xy[:, 1], 0, height, out=xy[:, 1])
+ np.clip(xy[:, 3], 0, height, out=xy[:, 3])
+ w = xy[:, 2] - xy[:, 0]
+ h = xy[:, 3] - xy[:, 1]
+ area = w * h
+ ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
+ i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
+
+ if sum(i) > 0:
+ sample['gt_bbox'] = xy[i].astype(sample['gt_bbox'].dtype)
+ sample['gt_class'] = sample['gt_class'][i]
+ if 'difficult' in sample:
+ sample['difficult'] = sample['difficult'][i]
+ if 'gt_ide' in sample:
+ sample['gt_ide'] = sample['gt_ide'][i]
+ if 'is_crowd' in sample:
+ sample['is_crowd'] = sample['is_crowd'][i]
+ sample['image'] = imw
+ return sample
+ else:
+ return sample
+
+
+@register_op
+class Gt2JDETargetThres(BaseOperator):
+ __shared__ = ['num_classes']
+ """
+ Generate JDE targets by groud truth data when training
+ Args:
+ anchors (list): anchors of JDE model
+ anchor_masks (list): anchor_masks of JDE model
+ downsample_ratios (list): downsample ratios of JDE model
+ ide_thresh (float): thresh of identity, higher is groud truth
+ fg_thresh (float): thresh of foreground, higher is foreground
+ bg_thresh (float): thresh of background, lower is background
+ num_classes (int): number of classes
+ """
+
+ def __init__(self,
+ anchors,
+ anchor_masks,
+ downsample_ratios,
+ ide_thresh=0.5,
+ fg_thresh=0.5,
+ bg_thresh=0.4,
+ num_classes=1):
+ super(Gt2JDETargetThres, self).__init__()
+ self.anchors = anchors
+ self.anchor_masks = anchor_masks
+ self.downsample_ratios = downsample_ratios
+ self.ide_thresh = ide_thresh
+ self.fg_thresh = fg_thresh
+ self.bg_thresh = bg_thresh
+ self.num_classes = num_classes
+
+ def generate_anchor(self, nGh, nGw, anchor_hw):
+ nA = len(anchor_hw)
+ yy, xx = np.meshgrid(np.arange(nGh), np.arange(nGw))
+
+ mesh = np.stack([xx.T, yy.T], axis=0) # [2, nGh, nGw]
+ mesh = np.repeat(mesh[None, :], nA, axis=0) # [nA, 2, nGh, nGw]
+
+ anchor_offset_mesh = anchor_hw[:, :, None][:, :, :, None]
+ anchor_offset_mesh = np.repeat(anchor_offset_mesh, nGh, axis=-2)
+ anchor_offset_mesh = np.repeat(anchor_offset_mesh, nGw, axis=-1)
+
+ anchor_mesh = np.concatenate(
+ [mesh, anchor_offset_mesh], axis=1) # [nA, 4, nGh, nGw]
+ return anchor_mesh
+
+ def encode_delta(self, gt_box_list, fg_anchor_list):
+ px, py, pw, ph = fg_anchor_list[:, 0], fg_anchor_list[:,1], \
+ fg_anchor_list[:, 2], fg_anchor_list[:,3]
+ gx, gy, gw, gh = gt_box_list[:, 0], gt_box_list[:, 1], \
+ gt_box_list[:, 2], gt_box_list[:, 3]
+ dx = (gx - px) / pw
+ dy = (gy - py) / ph
+ dw = np.log(gw / pw)
+ dh = np.log(gh / ph)
+ return np.stack([dx, dy, dw, dh], axis=1)
+
+ def pad_box(self, sample, num_max):
+ assert 'gt_bbox' in sample
+ bbox = sample['gt_bbox']
+ gt_num = len(bbox)
+ pad_bbox = np.zeros((num_max, 4), dtype=np.float32)
+ if gt_num > 0:
+ pad_bbox[:gt_num, :] = bbox[:gt_num, :]
+ sample['gt_bbox'] = pad_bbox
+ if 'gt_score' in sample:
+ pad_score = np.zeros((num_max, ), dtype=np.float32)
+ if gt_num > 0:
+ pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]
+ sample['gt_score'] = pad_score
+ if 'difficult' in sample:
+ pad_diff = np.zeros((num_max, ), dtype=np.int32)
+ if gt_num > 0:
+ pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]
+ sample['difficult'] = pad_diff
+ if 'is_crowd' in sample:
+ pad_crowd = np.zeros((num_max, ), dtype=np.int32)
+ if gt_num > 0:
+ pad_crowd[:gt_num] = sample['is_crowd'][:gt_num, 0]
+ sample['is_crowd'] = pad_crowd
+ if 'gt_ide' in sample:
+ pad_ide = np.zeros((num_max, ), dtype=np.int32)
+ if gt_num > 0:
+ pad_ide[:gt_num] = sample['gt_ide'][:gt_num, 0]
+ sample['gt_ide'] = pad_ide
+ return sample
+
+ def __call__(self, samples, context=None):
+ assert len(self.anchor_masks) == len(self.downsample_ratios), \
+ "anchor_masks', and 'downsample_ratios' should have same length."
+ h, w = samples[0]['image'].shape[1:3]
+
+ num_max = 0
+ for sample in samples:
+ num_max = max(num_max, len(sample['gt_bbox']))
+
+ for sample in samples:
+ gt_bbox = sample['gt_bbox']
+ gt_ide = sample['gt_ide']
+ for i, (anchor_hw, downsample_ratio
+ ) in enumerate(zip(self.anchors, self.downsample_ratios)):
+ anchor_hw = np.array(
+ anchor_hw, dtype=np.float32) / downsample_ratio
+ nA = len(anchor_hw)
+ nGh, nGw = int(h / downsample_ratio), int(w / downsample_ratio)
+ tbox = np.zeros((nA, nGh, nGw, 4), dtype=np.float32)
+ tconf = np.zeros((nA, nGh, nGw), dtype=np.float32)
+ tid = -np.ones((nA, nGh, nGw, 1), dtype=np.float32)
+
+ gxy, gwh = gt_bbox[:, 0:2].copy(), gt_bbox[:, 2:4].copy()
+ gxy[:, 0] = gxy[:, 0] * nGw
+ gxy[:, 1] = gxy[:, 1] * nGh
+ gwh[:, 0] = gwh[:, 0] * nGw
+ gwh[:, 1] = gwh[:, 1] * nGh
+ gxy[:, 0] = np.clip(gxy[:, 0], 0, nGw - 1)
+ gxy[:, 1] = np.clip(gxy[:, 1], 0, nGh - 1)
+ tboxes = np.concatenate([gxy, gwh], axis=1)
+
+ anchor_mesh = self.generate_anchor(nGh, nGw, anchor_hw)
+
+ anchor_list = np.transpose(anchor_mesh,
+ (0, 2, 3, 1)).reshape(-1, 4)
+ iou_pdist = bbox_iou_np_expand(
+ anchor_list, tboxes, x1y1x2y2=False)
+
+ iou_max = np.max(iou_pdist, axis=1)
+ max_gt_index = np.argmax(iou_pdist, axis=1)
+
+ iou_map = iou_max.reshape(nA, nGh, nGw)
+ gt_index_map = max_gt_index.reshape(nA, nGh, nGw)
+
+ id_index = iou_map > self.ide_thresh
+ fg_index = iou_map > self.fg_thresh
+ bg_index = iou_map < self.bg_thresh
+ ign_index = (iou_map < self.fg_thresh) * (
+ iou_map > self.bg_thresh)
+ tconf[fg_index] = 1
+ tconf[bg_index] = 0
+ tconf[ign_index] = -1
+
+ gt_index = gt_index_map[fg_index]
+ gt_box_list = tboxes[gt_index]
+ gt_id_list = gt_ide[gt_index_map[id_index]]
+
+ if np.sum(fg_index) > 0:
+ tid[id_index] = gt_id_list
+
+ fg_anchor_list = anchor_list.reshape(nA, nGh, nGw,
+ 4)[fg_index]
+ delta_target = self.encode_delta(gt_box_list,
+ fg_anchor_list)
+ tbox[fg_index] = delta_target
+
+ sample['tbox{}'.format(i)] = tbox
+ sample['tconf{}'.format(i)] = tconf
+ sample['tide{}'.format(i)] = tid
+ sample.pop('gt_class')
+ sample = self.pad_box(sample, num_max)
+ return samples
+
+
+@register_op
+class Gt2JDETargetMax(BaseOperator):
+ __shared__ = ['num_classes']
+ """
+ Generate JDE targets by groud truth data when evaluating
+ Args:
+ anchors (list): anchors of JDE model
+ anchor_masks (list): anchor_masks of JDE model
+ downsample_ratios (list): downsample ratios of JDE model
+ max_iou_thresh (float): iou thresh for high quality anchor
+ num_classes (int): number of classes
+ """
+
+ def __init__(self,
+ anchors,
+ anchor_masks,
+ downsample_ratios,
+ max_iou_thresh=0.60,
+ num_classes=1):
+ super(Gt2JDETargetMax, self).__init__()
+ self.anchors = anchors
+ self.anchor_masks = anchor_masks
+ self.downsample_ratios = downsample_ratios
+ self.max_iou_thresh = max_iou_thresh
+ self.num_classes = num_classes
+
+ def __call__(self, samples, context=None):
+ assert len(self.anchor_masks) == len(self.downsample_ratios), \
+ "anchor_masks', and 'downsample_ratios' should have same length."
+ h, w = samples[0]['image'].shape[1:3]
+ for sample in samples:
+ gt_bbox = sample['gt_bbox']
+ gt_ide = sample['gt_ide']
+ for i, (anchor_hw, downsample_ratio
+ ) in enumerate(zip(self.anchors, self.downsample_ratios)):
+ anchor_hw = np.array(
+ anchor_hw, dtype=np.float32) / downsample_ratio
+ nA = len(anchor_hw)
+ nGh, nGw = int(h / downsample_ratio), int(w / downsample_ratio)
+ tbox = np.zeros((nA, nGh, nGw, 4), dtype=np.float32)
+ tconf = np.zeros((nA, nGh, nGw), dtype=np.float32)
+ tid = -np.ones((nA, nGh, nGw, 1), dtype=np.float32)
+
+ gxy, gwh = gt_bbox[:, 0:2].copy(), gt_bbox[:, 2:4].copy()
+ gxy[:, 0] = gxy[:, 0] * nGw
+ gxy[:, 1] = gxy[:, 1] * nGh
+ gwh[:, 0] = gwh[:, 0] * nGw
+ gwh[:, 1] = gwh[:, 1] * nGh
+ gi = np.clip(gxy[:, 0], 0, nGw - 1).astype(int)
+ gj = np.clip(gxy[:, 1], 0, nGh - 1).astype(int)
+
+ # iou of targets-anchors (using wh only)
+ box1 = gwh
+ box2 = anchor_hw[:, None, :]
+ inter_area = np.minimum(box1, box2).prod(2)
+ iou = inter_area / (
+ box1.prod(1) + box2.prod(2) - inter_area + 1e-16)
+
+ # Select best iou_pred and anchor
+ iou_best = iou.max(0) # best anchor [0-2] for each target
+ a = np.argmax(iou, axis=0)
+
+ # Select best unique target-anchor combinations
+ iou_order = np.argsort(-iou_best) # best to worst
+
+ # Unique anchor selection
+ u = np.stack((gi, gj, a), 0)[:, iou_order]
+ _, first_unique = np.unique(u, axis=1, return_index=True)
+ mask = iou_order[first_unique]
+ # best anchor must share significant commonality (iou) with target
+ # TODO: examine arbitrary threshold
+ idx = mask[iou_best[mask] > self.max_iou_thresh]
+
+ if len(idx) > 0:
+ a_i, gj_i, gi_i = a[idx], gj[idx], gi[idx]
+ t_box = gt_bbox[idx]
+ t_id = gt_ide[idx]
+ if len(t_box.shape) == 1:
+ t_box = t_box.reshape(1, 4)
+
+ gxy, gwh = t_box[:, 0:2].copy(), t_box[:, 2:4].copy()
+ gxy[:, 0] = gxy[:, 0] * nGw
+ gxy[:, 1] = gxy[:, 1] * nGh
+ gwh[:, 0] = gwh[:, 0] * nGw
+ gwh[:, 1] = gwh[:, 1] * nGh
+
+ # XY coordinates
+ tbox[:, :, :, 0:2][a_i, gj_i, gi_i] = gxy - gxy.astype(int)
+ # Width and height in yolo method
+ tbox[:, :, :, 2:4][a_i, gj_i, gi_i] = np.log(gwh /
+ anchor_hw[a_i])
+ tconf[a_i, gj_i, gi_i] = 1
+ tid[a_i, gj_i, gi_i] = t_id
+
+ sample['tbox{}'.format(i)] = tbox
+ sample['tconf{}'.format(i)] = tconf
+ sample['tide{}'.format(i)] = tid
+
+
+class Gt2FairMOTTarget(Gt2TTFTarget):
+ __shared__ = ['num_classes']
+ """
+ Generate FairMOT targets by ground truth data.
+ Difference between Gt2FairMOTTarget and Gt2TTFTarget are:
+ 1. the gaussian kernal radius to generate a heatmap.
+ 2. the targets needed during traing.
+
+ Args:
+ num_classes(int): the number of classes.
+ down_ratio(int): the down ratio from images to heatmap, 4 by default.
+ max_objs(int): the maximum number of ground truth objects in a image, 500 by default.
+ """
+
+ def __init__(self, num_classes=1, down_ratio=4, max_objs=500):
+ super(Gt2TTFTarget, self).__init__()
+ self.down_ratio = down_ratio
+ self.num_classes = num_classes
+ self.max_objs = max_objs
+
+ def __call__(self, samples, context=None):
+ for b_id, sample in enumerate(samples):
+ output_h = sample['image'].shape[1] // self.down_ratio
+ output_w = sample['image'].shape[2] // self.down_ratio
+
+ heatmap = np.zeros(
+ (self.num_classes, output_h, output_w), dtype='float32')
+ bbox_size = np.zeros((self.max_objs, 4), dtype=np.float32)
+ center_offset = np.zeros((self.max_objs, 2), dtype=np.float32)
+ index = np.zeros((self.max_objs, ), dtype=np.int64)
+ index_mask = np.zeros((self.max_objs, ), dtype=np.int32)
+ reid = np.zeros((self.max_objs, ), dtype=np.int64)
+ bbox_xys = np.zeros((self.max_objs, 4), dtype=np.float32)
+ if self.num_classes > 1:
+ # each category corresponds to a set of track ids
+ cls_tr_ids = np.zeros(
+ (self.num_classes, output_h, output_w), dtype=np.int64)
+ cls_id_map = np.full((output_h, output_w), -1, dtype=np.int64)
+
+ gt_bbox = sample['gt_bbox']
+ gt_class = sample['gt_class']
+ gt_ide = sample['gt_ide']
+
+ for k in range(len(gt_bbox)):
+ cls_id = gt_class[k][0]
+ bbox = gt_bbox[k]
+ ide = gt_ide[k][0]
+ bbox[[0, 2]] = bbox[[0, 2]] * output_w
+ bbox[[1, 3]] = bbox[[1, 3]] * output_h
+ bbox_amodal = copy.deepcopy(bbox)
+ bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2.
+ bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.
+ bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]
+ bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]
+ bbox[0] = np.clip(bbox[0], 0, output_w - 1)
+ bbox[1] = np.clip(bbox[1], 0, output_h - 1)
+ h = bbox[3]
+ w = bbox[2]
+
+ bbox_xy = copy.deepcopy(bbox)
+ bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2
+ bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2
+ bbox_xy[2] = bbox_xy[0] + bbox_xy[2]
+ bbox_xy[3] = bbox_xy[1] + bbox_xy[3]
+
+ if h > 0 and w > 0:
+ radius = gaussian_radius((math.ceil(h), math.ceil(w)), 0.7)
+ radius = max(0, int(radius))
+ ct = np.array([bbox[0], bbox[1]], dtype=np.float32)
+ ct_int = ct.astype(np.int32)
+ self.draw_truncate_gaussian(heatmap[cls_id], ct_int, radius,
+ radius)
+ bbox_size[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
+ bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
+
+ index[k] = ct_int[1] * output_w + ct_int[0]
+ center_offset[k] = ct - ct_int
+ index_mask[k] = 1
+ reid[k] = ide
+ bbox_xys[k] = bbox_xy
+ if self.num_classes > 1:
+ cls_id_map[ct_int[1], ct_int[0]] = cls_id
+ cls_tr_ids[cls_id][ct_int[1]][ct_int[0]] = ide - 1
+ # track id start from 0
+
+ sample['heatmap'] = heatmap
+ sample['index'] = index
+ sample['offset'] = center_offset
+ sample['size'] = bbox_size
+ sample['index_mask'] = index_mask
+ sample['reid'] = reid
+ if self.num_classes > 1:
+ sample['cls_id_map'] = cls_id_map
+ sample['cls_tr_ids'] = cls_tr_ids
+ sample['bbox_xys'] = bbox_xys
+ sample.pop('is_crowd', None)
+ sample.pop('difficult', None)
+ sample.pop('gt_class', None)
+ sample.pop('gt_bbox', None)
+ sample.pop('gt_score', None)
+ sample.pop('gt_ide', None)
+ return samples
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/op_helper.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/op_helper.py
new file mode 100644
index 000000000..6c400306d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/op_helper.py
@@ -0,0 +1,494 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# this file contains helper methods for BBOX processing
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import random
+import math
+import cv2
+
+
+def meet_emit_constraint(src_bbox, sample_bbox):
+ center_x = (src_bbox[2] + src_bbox[0]) / 2
+ center_y = (src_bbox[3] + src_bbox[1]) / 2
+ if center_x >= sample_bbox[0] and \
+ center_x <= sample_bbox[2] and \
+ center_y >= sample_bbox[1] and \
+ center_y <= sample_bbox[3]:
+ return True
+ return False
+
+
+def clip_bbox(src_bbox):
+ src_bbox[0] = max(min(src_bbox[0], 1.0), 0.0)
+ src_bbox[1] = max(min(src_bbox[1], 1.0), 0.0)
+ src_bbox[2] = max(min(src_bbox[2], 1.0), 0.0)
+ src_bbox[3] = max(min(src_bbox[3], 1.0), 0.0)
+ return src_bbox
+
+
+def bbox_area(src_bbox):
+ if src_bbox[2] < src_bbox[0] or src_bbox[3] < src_bbox[1]:
+ return 0.
+ else:
+ width = src_bbox[2] - src_bbox[0]
+ height = src_bbox[3] - src_bbox[1]
+ return width * height
+
+
+def is_overlap(object_bbox, sample_bbox):
+ if object_bbox[0] >= sample_bbox[2] or \
+ object_bbox[2] <= sample_bbox[0] or \
+ object_bbox[1] >= sample_bbox[3] or \
+ object_bbox[3] <= sample_bbox[1]:
+ return False
+ else:
+ return True
+
+
+def filter_and_process(sample_bbox, bboxes, labels, scores=None,
+ keypoints=None):
+ new_bboxes = []
+ new_labels = []
+ new_scores = []
+ new_keypoints = []
+ new_kp_ignore = []
+ for i in range(len(bboxes)):
+ new_bbox = [0, 0, 0, 0]
+ obj_bbox = [bboxes[i][0], bboxes[i][1], bboxes[i][2], bboxes[i][3]]
+ if not meet_emit_constraint(obj_bbox, sample_bbox):
+ continue
+ if not is_overlap(obj_bbox, sample_bbox):
+ continue
+ sample_width = sample_bbox[2] - sample_bbox[0]
+ sample_height = sample_bbox[3] - sample_bbox[1]
+ new_bbox[0] = (obj_bbox[0] - sample_bbox[0]) / sample_width
+ new_bbox[1] = (obj_bbox[1] - sample_bbox[1]) / sample_height
+ new_bbox[2] = (obj_bbox[2] - sample_bbox[0]) / sample_width
+ new_bbox[3] = (obj_bbox[3] - sample_bbox[1]) / sample_height
+ new_bbox = clip_bbox(new_bbox)
+ if bbox_area(new_bbox) > 0:
+ new_bboxes.append(new_bbox)
+ new_labels.append([labels[i][0]])
+ if scores is not None:
+ new_scores.append([scores[i][0]])
+ if keypoints is not None:
+ sample_keypoint = keypoints[0][i]
+ for j in range(len(sample_keypoint)):
+ kp_len = sample_height if j % 2 else sample_width
+ sample_coord = sample_bbox[1] if j % 2 else sample_bbox[0]
+ sample_keypoint[j] = (
+ sample_keypoint[j] - sample_coord) / kp_len
+ sample_keypoint[j] = max(min(sample_keypoint[j], 1.0), 0.0)
+ new_keypoints.append(sample_keypoint)
+ new_kp_ignore.append(keypoints[1][i])
+
+ bboxes = np.array(new_bboxes)
+ labels = np.array(new_labels)
+ scores = np.array(new_scores)
+ if keypoints is not None:
+ keypoints = np.array(new_keypoints)
+ new_kp_ignore = np.array(new_kp_ignore)
+ return bboxes, labels, scores, (keypoints, new_kp_ignore)
+ return bboxes, labels, scores
+
+
+def bbox_area_sampling(bboxes, labels, scores, target_size, min_size):
+ new_bboxes = []
+ new_labels = []
+ new_scores = []
+ for i, bbox in enumerate(bboxes):
+ w = float((bbox[2] - bbox[0]) * target_size)
+ h = float((bbox[3] - bbox[1]) * target_size)
+ if w * h < float(min_size * min_size):
+ continue
+ else:
+ new_bboxes.append(bbox)
+ new_labels.append(labels[i])
+ if scores is not None and scores.size != 0:
+ new_scores.append(scores[i])
+ bboxes = np.array(new_bboxes)
+ labels = np.array(new_labels)
+ scores = np.array(new_scores)
+ return bboxes, labels, scores
+
+
+def generate_sample_bbox(sampler):
+ scale = np.random.uniform(sampler[2], sampler[3])
+ aspect_ratio = np.random.uniform(sampler[4], sampler[5])
+ aspect_ratio = max(aspect_ratio, (scale**2.0))
+ aspect_ratio = min(aspect_ratio, 1 / (scale**2.0))
+ bbox_width = scale * (aspect_ratio**0.5)
+ bbox_height = scale / (aspect_ratio**0.5)
+ xmin_bound = 1 - bbox_width
+ ymin_bound = 1 - bbox_height
+ xmin = np.random.uniform(0, xmin_bound)
+ ymin = np.random.uniform(0, ymin_bound)
+ xmax = xmin + bbox_width
+ ymax = ymin + bbox_height
+ sampled_bbox = [xmin, ymin, xmax, ymax]
+ return sampled_bbox
+
+
+def generate_sample_bbox_square(sampler, image_width, image_height):
+ scale = np.random.uniform(sampler[2], sampler[3])
+ aspect_ratio = np.random.uniform(sampler[4], sampler[5])
+ aspect_ratio = max(aspect_ratio, (scale**2.0))
+ aspect_ratio = min(aspect_ratio, 1 / (scale**2.0))
+ bbox_width = scale * (aspect_ratio**0.5)
+ bbox_height = scale / (aspect_ratio**0.5)
+ if image_height < image_width:
+ bbox_width = bbox_height * image_height / image_width
+ else:
+ bbox_height = bbox_width * image_width / image_height
+ xmin_bound = 1 - bbox_width
+ ymin_bound = 1 - bbox_height
+ xmin = np.random.uniform(0, xmin_bound)
+ ymin = np.random.uniform(0, ymin_bound)
+ xmax = xmin + bbox_width
+ ymax = ymin + bbox_height
+ sampled_bbox = [xmin, ymin, xmax, ymax]
+ return sampled_bbox
+
+
+def data_anchor_sampling(bbox_labels, image_width, image_height, scale_array,
+ resize_width):
+ num_gt = len(bbox_labels)
+ # np.random.randint range: [low, high)
+ rand_idx = np.random.randint(0, num_gt) if num_gt != 0 else 0
+
+ if num_gt != 0:
+ norm_xmin = bbox_labels[rand_idx][0]
+ norm_ymin = bbox_labels[rand_idx][1]
+ norm_xmax = bbox_labels[rand_idx][2]
+ norm_ymax = bbox_labels[rand_idx][3]
+
+ xmin = norm_xmin * image_width
+ ymin = norm_ymin * image_height
+ wid = image_width * (norm_xmax - norm_xmin)
+ hei = image_height * (norm_ymax - norm_ymin)
+ range_size = 0
+
+ area = wid * hei
+ for scale_ind in range(0, len(scale_array) - 1):
+ if area > scale_array[scale_ind] ** 2 and area < \
+ scale_array[scale_ind + 1] ** 2:
+ range_size = scale_ind + 1
+ break
+
+ if area > scale_array[len(scale_array) - 2]**2:
+ range_size = len(scale_array) - 2
+
+ scale_choose = 0.0
+ if range_size == 0:
+ rand_idx_size = 0
+ else:
+ # np.random.randint range: [low, high)
+ rng_rand_size = np.random.randint(0, range_size + 1)
+ rand_idx_size = rng_rand_size % (range_size + 1)
+
+ if rand_idx_size == range_size:
+ min_resize_val = scale_array[rand_idx_size] / 2.0
+ max_resize_val = min(2.0 * scale_array[rand_idx_size],
+ 2 * math.sqrt(wid * hei))
+ scale_choose = random.uniform(min_resize_val, max_resize_val)
+ else:
+ min_resize_val = scale_array[rand_idx_size] / 2.0
+ max_resize_val = 2.0 * scale_array[rand_idx_size]
+ scale_choose = random.uniform(min_resize_val, max_resize_val)
+
+ sample_bbox_size = wid * resize_width / scale_choose
+
+ w_off_orig = 0.0
+ h_off_orig = 0.0
+ if sample_bbox_size < max(image_height, image_width):
+ if wid <= sample_bbox_size:
+ w_off_orig = np.random.uniform(xmin + wid - sample_bbox_size,
+ xmin)
+ else:
+ w_off_orig = np.random.uniform(xmin,
+ xmin + wid - sample_bbox_size)
+
+ if hei <= sample_bbox_size:
+ h_off_orig = np.random.uniform(ymin + hei - sample_bbox_size,
+ ymin)
+ else:
+ h_off_orig = np.random.uniform(ymin,
+ ymin + hei - sample_bbox_size)
+
+ else:
+ w_off_orig = np.random.uniform(image_width - sample_bbox_size, 0.0)
+ h_off_orig = np.random.uniform(image_height - sample_bbox_size, 0.0)
+
+ w_off_orig = math.floor(w_off_orig)
+ h_off_orig = math.floor(h_off_orig)
+
+ # Figure out top left coordinates.
+ w_off = float(w_off_orig / image_width)
+ h_off = float(h_off_orig / image_height)
+
+ sampled_bbox = [
+ w_off, h_off, w_off + float(sample_bbox_size / image_width),
+ h_off + float(sample_bbox_size / image_height)
+ ]
+ return sampled_bbox
+ else:
+ return 0
+
+
+def jaccard_overlap(sample_bbox, object_bbox):
+ if sample_bbox[0] >= object_bbox[2] or \
+ sample_bbox[2] <= object_bbox[0] or \
+ sample_bbox[1] >= object_bbox[3] or \
+ sample_bbox[3] <= object_bbox[1]:
+ return 0
+ intersect_xmin = max(sample_bbox[0], object_bbox[0])
+ intersect_ymin = max(sample_bbox[1], object_bbox[1])
+ intersect_xmax = min(sample_bbox[2], object_bbox[2])
+ intersect_ymax = min(sample_bbox[3], object_bbox[3])
+ intersect_size = (intersect_xmax - intersect_xmin) * (
+ intersect_ymax - intersect_ymin)
+ sample_bbox_size = bbox_area(sample_bbox)
+ object_bbox_size = bbox_area(object_bbox)
+ overlap = intersect_size / (
+ sample_bbox_size + object_bbox_size - intersect_size)
+ return overlap
+
+
+def intersect_bbox(bbox1, bbox2):
+ if bbox2[0] > bbox1[2] or bbox2[2] < bbox1[0] or \
+ bbox2[1] > bbox1[3] or bbox2[3] < bbox1[1]:
+ intersection_box = [0.0, 0.0, 0.0, 0.0]
+ else:
+ intersection_box = [
+ max(bbox1[0], bbox2[0]), max(bbox1[1], bbox2[1]),
+ min(bbox1[2], bbox2[2]), min(bbox1[3], bbox2[3])
+ ]
+ return intersection_box
+
+
+def bbox_coverage(bbox1, bbox2):
+ inter_box = intersect_bbox(bbox1, bbox2)
+ intersect_size = bbox_area(inter_box)
+
+ if intersect_size > 0:
+ bbox1_size = bbox_area(bbox1)
+ return intersect_size / bbox1_size
+ else:
+ return 0.
+
+
+def satisfy_sample_constraint(sampler,
+ sample_bbox,
+ gt_bboxes,
+ satisfy_all=False):
+ if sampler[6] == 0 and sampler[7] == 0:
+ return True
+ satisfied = []
+ for i in range(len(gt_bboxes)):
+ object_bbox = [
+ gt_bboxes[i][0], gt_bboxes[i][1], gt_bboxes[i][2], gt_bboxes[i][3]
+ ]
+ overlap = jaccard_overlap(sample_bbox, object_bbox)
+ if sampler[6] != 0 and \
+ overlap < sampler[6]:
+ satisfied.append(False)
+ continue
+ if sampler[7] != 0 and \
+ overlap > sampler[7]:
+ satisfied.append(False)
+ continue
+ satisfied.append(True)
+ if not satisfy_all:
+ return True
+
+ if satisfy_all:
+ return np.all(satisfied)
+ else:
+ return False
+
+
+def satisfy_sample_constraint_coverage(sampler, sample_bbox, gt_bboxes):
+ if sampler[6] == 0 and sampler[7] == 0:
+ has_jaccard_overlap = False
+ else:
+ has_jaccard_overlap = True
+ if sampler[8] == 0 and sampler[9] == 0:
+ has_object_coverage = False
+ else:
+ has_object_coverage = True
+
+ if not has_jaccard_overlap and not has_object_coverage:
+ return True
+ found = False
+ for i in range(len(gt_bboxes)):
+ object_bbox = [
+ gt_bboxes[i][0], gt_bboxes[i][1], gt_bboxes[i][2], gt_bboxes[i][3]
+ ]
+ if has_jaccard_overlap:
+ overlap = jaccard_overlap(sample_bbox, object_bbox)
+ if sampler[6] != 0 and \
+ overlap < sampler[6]:
+ continue
+ if sampler[7] != 0 and \
+ overlap > sampler[7]:
+ continue
+ found = True
+ if has_object_coverage:
+ object_coverage = bbox_coverage(object_bbox, sample_bbox)
+ if sampler[8] != 0 and \
+ object_coverage < sampler[8]:
+ continue
+ if sampler[9] != 0 and \
+ object_coverage > sampler[9]:
+ continue
+ found = True
+ if found:
+ return True
+ return found
+
+
+def crop_image_sampling(img, sample_bbox, image_width, image_height,
+ target_size):
+ # no clipping here
+ xmin = int(sample_bbox[0] * image_width)
+ xmax = int(sample_bbox[2] * image_width)
+ ymin = int(sample_bbox[1] * image_height)
+ ymax = int(sample_bbox[3] * image_height)
+
+ w_off = xmin
+ h_off = ymin
+ width = xmax - xmin
+ height = ymax - ymin
+ cross_xmin = max(0.0, float(w_off))
+ cross_ymin = max(0.0, float(h_off))
+ cross_xmax = min(float(w_off + width - 1.0), float(image_width))
+ cross_ymax = min(float(h_off + height - 1.0), float(image_height))
+ cross_width = cross_xmax - cross_xmin
+ cross_height = cross_ymax - cross_ymin
+
+ roi_xmin = 0 if w_off >= 0 else abs(w_off)
+ roi_ymin = 0 if h_off >= 0 else abs(h_off)
+ roi_width = cross_width
+ roi_height = cross_height
+
+ roi_y1 = int(roi_ymin)
+ roi_y2 = int(roi_ymin + roi_height)
+ roi_x1 = int(roi_xmin)
+ roi_x2 = int(roi_xmin + roi_width)
+
+ cross_y1 = int(cross_ymin)
+ cross_y2 = int(cross_ymin + cross_height)
+ cross_x1 = int(cross_xmin)
+ cross_x2 = int(cross_xmin + cross_width)
+
+ sample_img = np.zeros((height, width, 3))
+ sample_img[roi_y1: roi_y2, roi_x1: roi_x2] = \
+ img[cross_y1: cross_y2, cross_x1: cross_x2]
+
+ sample_img = cv2.resize(
+ sample_img, (target_size, target_size), interpolation=cv2.INTER_AREA)
+
+ return sample_img
+
+
+def is_poly(segm):
+ assert isinstance(segm, (list, dict)), \
+ "Invalid segm type: {}".format(type(segm))
+ return isinstance(segm, list)
+
+
+def gaussian_radius(bbox_size, min_overlap):
+ height, width = bbox_size
+
+ a1 = 1
+ b1 = (height + width)
+ c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
+ sq1 = np.sqrt(b1**2 - 4 * a1 * c1)
+ radius1 = (b1 + sq1) / (2 * a1)
+
+ a2 = 4
+ b2 = 2 * (height + width)
+ c2 = (1 - min_overlap) * width * height
+ sq2 = np.sqrt(b2**2 - 4 * a2 * c2)
+ radius2 = (b2 + sq2) / 2
+
+ a3 = 4 * min_overlap
+ b3 = -2 * min_overlap * (height + width)
+ c3 = (min_overlap - 1) * width * height
+ sq3 = np.sqrt(b3**2 - 4 * a3 * c3)
+ radius3 = (b3 + sq3) / 2
+ return min(radius1, radius2, radius3)
+
+
+def draw_gaussian(heatmap, center, radius, k=1, delte=6):
+ diameter = 2 * radius + 1
+ sigma = diameter / delte
+ gaussian = gaussian2D((diameter, diameter), sigma_x=sigma, sigma_y=sigma)
+
+ x, y = center
+
+ height, width = heatmap.shape[0:2]
+
+ left, right = min(x, radius), min(width - x, radius + 1)
+ top, bottom = min(y, radius), min(height - y, radius + 1)
+
+ masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
+ masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:
+ radius + right]
+ np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
+
+
+def gaussian2D(shape, sigma_x=1, sigma_y=1):
+ m, n = [(ss - 1.) / 2. for ss in shape]
+ y, x = np.ogrid[-m:m + 1, -n:n + 1]
+
+ h = np.exp(-(x * x / (2 * sigma_x * sigma_x) + y * y / (2 * sigma_y *
+ sigma_y)))
+ h[h < np.finfo(h.dtype).eps * h.max()] = 0
+ return h
+
+
+def draw_umich_gaussian(heatmap, center, radius, k=1):
+ """
+ draw_umich_gaussian, refer to https://github.com/xingyizhou/CenterNet/blob/master/src/lib/utils/image.py#L126
+ """
+ diameter = 2 * radius + 1
+ gaussian = gaussian2D(
+ (diameter, diameter), sigma_x=diameter / 6, sigma_y=diameter / 6)
+
+ x, y = int(center[0]), int(center[1])
+
+ height, width = heatmap.shape[0:2]
+
+ left, right = min(x, radius), min(width - x, radius + 1)
+ top, bottom = min(y, radius), min(height - y, radius + 1)
+
+ masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
+ masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:
+ radius + right]
+ if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
+ np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
+ return heatmap
+
+
+def get_border(border, size):
+ i = 1
+ while size - border // i <= border // i:
+ i *= 2
+ return border // i
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/operators.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/operators.py
new file mode 100644
index 000000000..5cc14a44d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/data/transform/operators.py
@@ -0,0 +1,3015 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# function:
+# operators to process sample,
+# eg: decode/resize/crop image
+
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import division
+
+try:
+ from collections.abc import Sequence
+except Exception:
+ from collections import Sequence
+
+from numbers import Number, Integral
+
+import uuid
+import random
+import math
+import numpy as np
+import os
+import copy
+import logging
+import cv2
+from PIL import Image, ImageDraw
+import pickle
+import threading
+MUTEX = threading.Lock()
+
+from ppdet.core.workspace import serializable
+from ppdet.modeling import bbox_utils
+from ..reader import Compose
+
+from .op_helper import (satisfy_sample_constraint, filter_and_process,
+ generate_sample_bbox, clip_bbox, data_anchor_sampling,
+ satisfy_sample_constraint_coverage, crop_image_sampling,
+ generate_sample_bbox_square, bbox_area_sampling,
+ is_poly, get_border)
+
+from ppdet.utils.logger import setup_logger
+from ppdet.modeling.keypoint_utils import get_affine_transform, affine_transform
+logger = setup_logger(__name__)
+
+registered_ops = []
+
+
+def register_op(cls):
+ registered_ops.append(cls.__name__)
+ if not hasattr(BaseOperator, cls.__name__):
+ setattr(BaseOperator, cls.__name__, cls)
+ else:
+ raise KeyError("The {} class has been registered.".format(cls.__name__))
+ return serializable(cls)
+
+
+class BboxError(ValueError):
+ pass
+
+
+class ImageError(ValueError):
+ pass
+
+
+class BaseOperator(object):
+ def __init__(self, name=None):
+ if name is None:
+ name = self.__class__.__name__
+ self._id = name + '_' + str(uuid.uuid4())[-6:]
+
+ def apply(self, sample, context=None):
+ """ Process a sample.
+ Args:
+ sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}
+ context (dict): info about this sample processing
+ Returns:
+ result (dict): a processed sample
+ """
+ return sample
+
+ def __call__(self, sample, context=None):
+ """ Process a sample.
+ Args:
+ sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}
+ context (dict): info about this sample processing
+ Returns:
+ result (dict): a processed sample
+ """
+ if isinstance(sample, Sequence):
+ for i in range(len(sample)):
+ sample[i] = self.apply(sample[i], context)
+ else:
+ sample = self.apply(sample, context)
+ return sample
+
+ def __str__(self):
+ return str(self._id)
+
+
+@register_op
+class Decode(BaseOperator):
+ def __init__(self):
+ """ Transform the image data to numpy format following the rgb format
+ """
+ super(Decode, self).__init__()
+
+ def apply(self, sample, context=None):
+ """ load image if 'im_file' field is not empty but 'image' is"""
+ if 'image' not in sample:
+ with open(sample['im_file'], 'rb') as f:
+ sample['image'] = f.read()
+ sample.pop('im_file')
+
+ im = sample['image']
+ data = np.frombuffer(im, dtype='uint8')
+ im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
+ if 'keep_ori_im' in sample and sample['keep_ori_im']:
+ sample['ori_image'] = im
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
+
+ sample['image'] = im
+ if 'h' not in sample:
+ sample['h'] = im.shape[0]
+ elif sample['h'] != im.shape[0]:
+ logger.warning(
+ "The actual image height: {} is not equal to the "
+ "height: {} in annotation, and update sample['h'] by actual "
+ "image height.".format(im.shape[0], sample['h']))
+ sample['h'] = im.shape[0]
+ if 'w' not in sample:
+ sample['w'] = im.shape[1]
+ elif sample['w'] != im.shape[1]:
+ logger.warning(
+ "The actual image width: {} is not equal to the "
+ "width: {} in annotation, and update sample['w'] by actual "
+ "image width.".format(im.shape[1], sample['w']))
+ sample['w'] = im.shape[1]
+
+ sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+ sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
+ return sample
+
+
+def _make_dirs(dirname):
+ try:
+ from pathlib import Path
+ except ImportError:
+ from pathlib2 import Path
+ Path(dirname).mkdir(exist_ok=True)
+
+
+@register_op
+class DecodeCache(BaseOperator):
+ def __init__(self, cache_root=None):
+ '''decode image and caching
+ '''
+ super(DecodeCache, self).__init__()
+
+ self.use_cache = False if cache_root is None else True
+ self.cache_root = cache_root
+
+ if cache_root is not None:
+ _make_dirs(cache_root)
+
+ def apply(self, sample, context=None):
+
+ if self.use_cache and os.path.exists(
+ self.cache_path(self.cache_root, sample['im_file'])):
+ path = self.cache_path(self.cache_root, sample['im_file'])
+ im = self.load(path)
+
+ else:
+ if 'image' not in sample:
+ with open(sample['im_file'], 'rb') as f:
+ sample['image'] = f.read()
+
+ im = sample['image']
+ data = np.frombuffer(im, dtype='uint8')
+ im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
+ if 'keep_ori_im' in sample and sample['keep_ori_im']:
+ sample['ori_image'] = im
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
+
+ if self.use_cache and not os.path.exists(
+ self.cache_path(self.cache_root, sample['im_file'])):
+ path = self.cache_path(self.cache_root, sample['im_file'])
+ self.dump(im, path)
+
+ sample['image'] = im
+ sample['h'] = im.shape[0]
+ sample['w'] = im.shape[1]
+
+ sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+ sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
+
+ sample.pop('im_file')
+
+ return sample
+
+ @staticmethod
+ def cache_path(dir_oot, im_file):
+ return os.path.join(dir_oot, os.path.basename(im_file) + '.pkl')
+
+ @staticmethod
+ def load(path):
+ with open(path, 'rb') as f:
+ im = pickle.load(f)
+ return im
+
+ @staticmethod
+ def dump(obj, path):
+ MUTEX.acquire()
+ try:
+ with open(path, 'wb') as f:
+ pickle.dump(obj, f)
+
+ except Exception as e:
+ logger.warning('dump {} occurs exception {}'.format(path, str(e)))
+
+ finally:
+ MUTEX.release()
+
+
+@register_op
+class SniperDecodeCrop(BaseOperator):
+ def __init__(self):
+ super(SniperDecodeCrop, self).__init__()
+
+ def __call__(self, sample, context=None):
+ if 'image' not in sample:
+ with open(sample['im_file'], 'rb') as f:
+ sample['image'] = f.read()
+ sample.pop('im_file')
+
+ im = sample['image']
+ data = np.frombuffer(im, dtype='uint8')
+ im = cv2.imdecode(data, cv2.IMREAD_COLOR) # BGR mode, but need RGB mode
+ if 'keep_ori_im' in sample and sample['keep_ori_im']:
+ sample['ori_image'] = im
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
+
+ chip = sample['chip']
+ x1, y1, x2, y2 = [int(xi) for xi in chip]
+ im = im[max(y1, 0):min(y2, im.shape[0]), max(x1, 0):min(x2, im.shape[
+ 1]), :]
+
+ sample['image'] = im
+ h = im.shape[0]
+ w = im.shape[1]
+ # sample['im_info'] = [h, w, 1.0]
+ sample['h'] = h
+ sample['w'] = w
+
+ sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+ sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
+ return sample
+
+
+@register_op
+class Permute(BaseOperator):
+ def __init__(self):
+ """
+ Change the channel to be (C, H, W)
+ """
+ super(Permute, self).__init__()
+
+ def apply(self, sample, context=None):
+ im = sample['image']
+ im = im.transpose((2, 0, 1))
+ sample['image'] = im
+ return sample
+
+
+@register_op
+class Lighting(BaseOperator):
+ """
+ Lighting the image by eigenvalues and eigenvectors
+ Args:
+ eigval (list): eigenvalues
+ eigvec (list): eigenvectors
+ alphastd (float): random weight of lighting, 0.1 by default
+ """
+
+ def __init__(self, eigval, eigvec, alphastd=0.1):
+ super(Lighting, self).__init__()
+ self.alphastd = alphastd
+ self.eigval = np.array(eigval).astype('float32')
+ self.eigvec = np.array(eigvec).astype('float32')
+
+ def apply(self, sample, context=None):
+ alpha = np.random.normal(scale=self.alphastd, size=(3, ))
+ sample['image'] += np.dot(self.eigvec, self.eigval * alpha)
+ return sample
+
+
+@register_op
+class RandomErasingImage(BaseOperator):
+ def __init__(self, prob=0.5, lower=0.02, higher=0.4, aspect_ratio=0.3):
+ """
+ Random Erasing Data Augmentation, see https://arxiv.org/abs/1708.04896
+ Args:
+ prob (float): probability to carry out random erasing
+ lower (float): lower limit of the erasing area ratio
+ higher (float): upper limit of the erasing area ratio
+ aspect_ratio (float): aspect ratio of the erasing region
+ """
+ super(RandomErasingImage, self).__init__()
+ self.prob = prob
+ self.lower = lower
+ self.higher = higher
+ self.aspect_ratio = aspect_ratio
+
+ def apply(self, sample):
+ gt_bbox = sample['gt_bbox']
+ im = sample['image']
+ if not isinstance(im, np.ndarray):
+ raise TypeError("{}: image is not a numpy array.".format(self))
+ if len(im.shape) != 3:
+ raise ImageError("{}: image is not 3-dimensional.".format(self))
+
+ for idx in range(gt_bbox.shape[0]):
+ if self.prob <= np.random.rand():
+ continue
+
+ x1, y1, x2, y2 = gt_bbox[idx, :]
+ w_bbox = x2 - x1
+ h_bbox = y2 - y1
+ area = w_bbox * h_bbox
+
+ target_area = random.uniform(self.lower, self.higher) * area
+ aspect_ratio = random.uniform(self.aspect_ratio,
+ 1 / self.aspect_ratio)
+
+ h = int(round(math.sqrt(target_area * aspect_ratio)))
+ w = int(round(math.sqrt(target_area / aspect_ratio)))
+
+ if w < w_bbox and h < h_bbox:
+ off_y1 = random.randint(0, int(h_bbox - h))
+ off_x1 = random.randint(0, int(w_bbox - w))
+ im[int(y1 + off_y1):int(y1 + off_y1 + h), int(x1 + off_x1):int(
+ x1 + off_x1 + w), :] = 0
+ sample['image'] = im
+ return sample
+
+
+@register_op
+class NormalizeImage(BaseOperator):
+ def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1],
+ is_scale=True):
+ """
+ Args:
+ mean (list): the pixel mean
+ std (list): the pixel variance
+ """
+ super(NormalizeImage, self).__init__()
+ self.mean = mean
+ self.std = std
+ self.is_scale = is_scale
+ if not (isinstance(self.mean, list) and isinstance(self.std, list) and
+ isinstance(self.is_scale, bool)):
+ raise TypeError("{}: input type is invalid.".format(self))
+ from functools import reduce
+ if reduce(lambda x, y: x * y, self.std) == 0:
+ raise ValueError('{}: std is invalid!'.format(self))
+
+ def apply(self, sample, context=None):
+ """Normalize the image.
+ Operators:
+ 1.(optional) Scale the image to [0,1]
+ 2. Each pixel minus mean and is divided by std
+ """
+ im = sample['image']
+ im = im.astype(np.float32, copy=False)
+ mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
+ std = np.array(self.std)[np.newaxis, np.newaxis, :]
+
+ if self.is_scale:
+ im = im / 255.0
+
+ im -= mean
+ im /= std
+
+ sample['image'] = im
+ return sample
+
+
+@register_op
+class GridMask(BaseOperator):
+ def __init__(self,
+ use_h=True,
+ use_w=True,
+ rotate=1,
+ offset=False,
+ ratio=0.5,
+ mode=1,
+ prob=0.7,
+ upper_iter=360000):
+ """
+ GridMask Data Augmentation, see https://arxiv.org/abs/2001.04086
+ Args:
+ use_h (bool): whether to mask vertically
+ use_w (boo;): whether to mask horizontally
+ rotate (float): angle for the mask to rotate
+ offset (float): mask offset
+ ratio (float): mask ratio
+ mode (int): gridmask mode
+ prob (float): max probability to carry out gridmask
+ upper_iter (int): suggested to be equal to global max_iter
+ """
+ super(GridMask, self).__init__()
+ self.use_h = use_h
+ self.use_w = use_w
+ self.rotate = rotate
+ self.offset = offset
+ self.ratio = ratio
+ self.mode = mode
+ self.prob = prob
+ self.upper_iter = upper_iter
+
+ from .gridmask_utils import Gridmask
+ self.gridmask_op = Gridmask(
+ use_h,
+ use_w,
+ rotate=rotate,
+ offset=offset,
+ ratio=ratio,
+ mode=mode,
+ prob=prob,
+ upper_iter=upper_iter)
+
+ def apply(self, sample, context=None):
+ sample['image'] = self.gridmask_op(sample['image'], sample['curr_iter'])
+ return sample
+
+
+@register_op
+class RandomDistort(BaseOperator):
+ """Random color distortion.
+ Args:
+ hue (list): hue settings. in [lower, upper, probability] format.
+ saturation (list): saturation settings. in [lower, upper, probability] format.
+ contrast (list): contrast settings. in [lower, upper, probability] format.
+ brightness (list): brightness settings. in [lower, upper, probability] format.
+ random_apply (bool): whether to apply in random (yolo) or fixed (SSD)
+ order.
+ count (int): the number of doing distrot
+ random_channel (bool): whether to swap channels randomly
+ """
+
+ def __init__(self,
+ hue=[-18, 18, 0.5],
+ saturation=[0.5, 1.5, 0.5],
+ contrast=[0.5, 1.5, 0.5],
+ brightness=[0.5, 1.5, 0.5],
+ random_apply=True,
+ count=4,
+ random_channel=False):
+ super(RandomDistort, self).__init__()
+ self.hue = hue
+ self.saturation = saturation
+ self.contrast = contrast
+ self.brightness = brightness
+ self.random_apply = random_apply
+ self.count = count
+ self.random_channel = random_channel
+
+ def apply_hue(self, img):
+ low, high, prob = self.hue
+ if np.random.uniform(0., 1.) < prob:
+ return img
+
+ img = img.astype(np.float32)
+ # it works, but result differ from HSV version
+ delta = np.random.uniform(low, high)
+ u = np.cos(delta * np.pi)
+ w = np.sin(delta * np.pi)
+ bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
+ tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],
+ [0.211, -0.523, 0.311]])
+ ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],
+ [1.0, -1.107, 1.705]])
+ t = np.dot(np.dot(ityiq, bt), tyiq).T
+ img = np.dot(img, t)
+ return img
+
+ def apply_saturation(self, img):
+ low, high, prob = self.saturation
+ if np.random.uniform(0., 1.) < prob:
+ return img
+ delta = np.random.uniform(low, high)
+ img = img.astype(np.float32)
+ # it works, but result differ from HSV version
+ gray = img * np.array([[[0.299, 0.587, 0.114]]], dtype=np.float32)
+ gray = gray.sum(axis=2, keepdims=True)
+ gray *= (1.0 - delta)
+ img *= delta
+ img += gray
+ return img
+
+ def apply_contrast(self, img):
+ low, high, prob = self.contrast
+ if np.random.uniform(0., 1.) < prob:
+ return img
+ delta = np.random.uniform(low, high)
+ img = img.astype(np.float32)
+ img *= delta
+ return img
+
+ def apply_brightness(self, img):
+ low, high, prob = self.brightness
+ if np.random.uniform(0., 1.) < prob:
+ return img
+ delta = np.random.uniform(low, high)
+ img = img.astype(np.float32)
+ img += delta
+ return img
+
+ def apply(self, sample, context=None):
+ img = sample['image']
+ if self.random_apply:
+ functions = [
+ self.apply_brightness, self.apply_contrast,
+ self.apply_saturation, self.apply_hue
+ ]
+ distortions = np.random.permutation(functions)[:self.count]
+ for func in distortions:
+ img = func(img)
+ sample['image'] = img
+ return sample
+
+ img = self.apply_brightness(img)
+ mode = np.random.randint(0, 2)
+
+ if mode:
+ img = self.apply_contrast(img)
+
+ img = self.apply_saturation(img)
+ img = self.apply_hue(img)
+
+ if not mode:
+ img = self.apply_contrast(img)
+
+ if self.random_channel:
+ if np.random.randint(0, 2):
+ img = img[..., np.random.permutation(3)]
+ sample['image'] = img
+ return sample
+
+
+@register_op
+class AutoAugment(BaseOperator):
+ def __init__(self, autoaug_type="v1"):
+ """
+ Args:
+ autoaug_type (str): autoaug type, support v0, v1, v2, v3, test
+ """
+ super(AutoAugment, self).__init__()
+ self.autoaug_type = autoaug_type
+
+ def apply(self, sample, context=None):
+ """
+ Learning Data Augmentation Strategies for Object Detection, see https://arxiv.org/abs/1906.11172
+ """
+ im = sample['image']
+ gt_bbox = sample['gt_bbox']
+ if not isinstance(im, np.ndarray):
+ raise TypeError("{}: image is not a numpy array.".format(self))
+ if len(im.shape) != 3:
+ raise ImageError("{}: image is not 3-dimensional.".format(self))
+ if len(gt_bbox) == 0:
+ return sample
+
+ height, width, _ = im.shape
+ norm_gt_bbox = np.ones_like(gt_bbox, dtype=np.float32)
+ norm_gt_bbox[:, 0] = gt_bbox[:, 1] / float(height)
+ norm_gt_bbox[:, 1] = gt_bbox[:, 0] / float(width)
+ norm_gt_bbox[:, 2] = gt_bbox[:, 3] / float(height)
+ norm_gt_bbox[:, 3] = gt_bbox[:, 2] / float(width)
+
+ from .autoaugment_utils import distort_image_with_autoaugment
+ im, norm_gt_bbox = distort_image_with_autoaugment(im, norm_gt_bbox,
+ self.autoaug_type)
+
+ gt_bbox[:, 0] = norm_gt_bbox[:, 1] * float(width)
+ gt_bbox[:, 1] = norm_gt_bbox[:, 0] * float(height)
+ gt_bbox[:, 2] = norm_gt_bbox[:, 3] * float(width)
+ gt_bbox[:, 3] = norm_gt_bbox[:, 2] * float(height)
+
+ sample['image'] = im
+ sample['gt_bbox'] = gt_bbox
+ return sample
+
+
+@register_op
+class RandomFlip(BaseOperator):
+ def __init__(self, prob=0.5):
+ """
+ Args:
+ prob (float): the probability of flipping image
+ """
+ super(RandomFlip, self).__init__()
+ self.prob = prob
+ if not (isinstance(self.prob, float)):
+ raise TypeError("{}: input type is invalid.".format(self))
+
+ def apply_segm(self, segms, height, width):
+ def _flip_poly(poly, width):
+ flipped_poly = np.array(poly)
+ flipped_poly[0::2] = width - np.array(poly[0::2])
+ return flipped_poly.tolist()
+
+ def _flip_rle(rle, height, width):
+ if 'counts' in rle and type(rle['counts']) == list:
+ rle = mask_util.frPyObjects(rle, height, width)
+ mask = mask_util.decode(rle)
+ mask = mask[:, ::-1]
+ rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
+ return rle
+
+ flipped_segms = []
+ for segm in segms:
+ if is_poly(segm):
+ # Polygon format
+ flipped_segms.append([_flip_poly(poly, width) for poly in segm])
+ else:
+ # RLE format
+ import pycocotools.mask as mask_util
+ flipped_segms.append(_flip_rle(segm, height, width))
+ return flipped_segms
+
+ def apply_keypoint(self, gt_keypoint, width):
+ for i in range(gt_keypoint.shape[1]):
+ if i % 2 == 0:
+ old_x = gt_keypoint[:, i].copy()
+ gt_keypoint[:, i] = width - old_x
+ return gt_keypoint
+
+ def apply_image(self, image):
+ return image[:, ::-1, :]
+
+ def apply_bbox(self, bbox, width):
+ oldx1 = bbox[:, 0].copy()
+ oldx2 = bbox[:, 2].copy()
+ bbox[:, 0] = width - oldx2
+ bbox[:, 2] = width - oldx1
+ return bbox
+
+ def apply_rbox(self, bbox, width):
+ oldx1 = bbox[:, 0].copy()
+ oldx2 = bbox[:, 2].copy()
+ oldx3 = bbox[:, 4].copy()
+ oldx4 = bbox[:, 6].copy()
+ bbox[:, 0] = width - oldx1
+ bbox[:, 2] = width - oldx2
+ bbox[:, 4] = width - oldx3
+ bbox[:, 6] = width - oldx4
+ bbox = [bbox_utils.get_best_begin_point_single(e) for e in bbox]
+ return bbox
+
+ def apply(self, sample, context=None):
+ """Filp the image and bounding box.
+ Operators:
+ 1. Flip the image numpy.
+ 2. Transform the bboxes' x coordinates.
+ (Must judge whether the coordinates are normalized!)
+ 3. Transform the segmentations' x coordinates.
+ (Must judge whether the coordinates are normalized!)
+ Output:
+ sample: the image, bounding box and segmentation part
+ in sample are flipped.
+ """
+ if np.random.uniform(0, 1) < self.prob:
+ im = sample['image']
+ height, width = im.shape[:2]
+ im = self.apply_image(im)
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
+ sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], width)
+ if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
+ sample['gt_poly'] = self.apply_segm(sample['gt_poly'], height,
+ width)
+ if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:
+ sample['gt_keypoint'] = self.apply_keypoint(
+ sample['gt_keypoint'], width)
+
+ if 'semantic' in sample and sample['semantic']:
+ sample['semantic'] = sample['semantic'][:, ::-1]
+
+ if 'gt_segm' in sample and sample['gt_segm'].any():
+ sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]
+
+ if 'gt_rbox2poly' in sample and sample['gt_rbox2poly'].any():
+ sample['gt_rbox2poly'] = self.apply_rbox(sample['gt_rbox2poly'],
+ width)
+
+ sample['flipped'] = True
+ sample['image'] = im
+ return sample
+
+
+@register_op
+class Resize(BaseOperator):
+ def __init__(self, target_size, keep_ratio, interp=cv2.INTER_LINEAR):
+ """
+ Resize image to target size. if keep_ratio is True,
+ resize the image's long side to the maximum of target_size
+ if keep_ratio is False, resize the image to target size(h, w)
+ Args:
+ target_size (int|list): image target size
+ keep_ratio (bool): whether keep_ratio or not, default true
+ interp (int): the interpolation method
+ """
+ super(Resize, self).__init__()
+ self.keep_ratio = keep_ratio
+ self.interp = interp
+ if not isinstance(target_size, (Integral, Sequence)):
+ raise TypeError(
+ "Type of target_size is invalid. Must be Integer or List or Tuple, now is {}".
+ format(type(target_size)))
+ if isinstance(target_size, Integral):
+ target_size = [target_size, target_size]
+ self.target_size = target_size
+
+ def apply_image(self, image, scale):
+ im_scale_x, im_scale_y = scale
+
+ return cv2.resize(
+ image,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=self.interp)
+
+ def apply_bbox(self, bbox, scale, size):
+ im_scale_x, im_scale_y = scale
+ resize_w, resize_h = size
+ bbox[:, 0::2] *= im_scale_x
+ bbox[:, 1::2] *= im_scale_y
+ bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, resize_w)
+ bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, resize_h)
+ return bbox
+
+ def apply_segm(self, segms, im_size, scale):
+ def _resize_poly(poly, im_scale_x, im_scale_y):
+ resized_poly = np.array(poly).astype('float32')
+ resized_poly[0::2] *= im_scale_x
+ resized_poly[1::2] *= im_scale_y
+ return resized_poly.tolist()
+
+ def _resize_rle(rle, im_h, im_w, im_scale_x, im_scale_y):
+ if 'counts' in rle and type(rle['counts']) == list:
+ rle = mask_util.frPyObjects(rle, im_h, im_w)
+
+ mask = mask_util.decode(rle)
+ mask = cv2.resize(
+ mask,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=self.interp)
+ rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
+ return rle
+
+ im_h, im_w = im_size
+ im_scale_x, im_scale_y = scale
+ resized_segms = []
+ for segm in segms:
+ if is_poly(segm):
+ # Polygon format
+ resized_segms.append([
+ _resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
+ ])
+ else:
+ # RLE format
+ import pycocotools.mask as mask_util
+ resized_segms.append(
+ _resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
+
+ return resized_segms
+
+ def apply(self, sample, context=None):
+ """ Resize the image numpy.
+ """
+ im = sample['image']
+ if not isinstance(im, np.ndarray):
+ raise TypeError("{}: image type is not numpy.".format(self))
+ if len(im.shape) != 3:
+ raise ImageError('{}: image is not 3-dimensional.'.format(self))
+
+ # apply image
+ im_shape = im.shape
+ if self.keep_ratio:
+
+ im_size_min = np.min(im_shape[0:2])
+ im_size_max = np.max(im_shape[0:2])
+
+ target_size_min = np.min(self.target_size)
+ target_size_max = np.max(self.target_size)
+
+ im_scale = min(target_size_min / im_size_min,
+ target_size_max / im_size_max)
+
+ resize_h = im_scale * float(im_shape[0])
+ resize_w = im_scale * float(im_shape[1])
+
+ im_scale_x = im_scale
+ im_scale_y = im_scale
+ else:
+ resize_h, resize_w = self.target_size
+ im_scale_y = resize_h / im_shape[0]
+ im_scale_x = resize_w / im_shape[1]
+
+ im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])
+ sample['image'] = im
+ sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
+ if 'scale_factor' in sample:
+ scale_factor = sample['scale_factor']
+ sample['scale_factor'] = np.asarray(
+ [scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
+ dtype=np.float32)
+ else:
+ sample['scale_factor'] = np.asarray(
+ [im_scale_y, im_scale_x], dtype=np.float32)
+
+ # apply bbox
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
+ sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'],
+ [im_scale_x, im_scale_y],
+ [resize_w, resize_h])
+
+ # apply rbox
+ if 'gt_rbox2poly' in sample:
+ if np.array(sample['gt_rbox2poly']).shape[1] != 8:
+ logger.warning(
+ "gt_rbox2poly's length shoule be 8, but actually is {}".
+ format(len(sample['gt_rbox2poly'])))
+ sample['gt_rbox2poly'] = self.apply_bbox(sample['gt_rbox2poly'],
+ [im_scale_x, im_scale_y],
+ [resize_w, resize_h])
+
+ # apply polygon
+ if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
+ sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_shape[:2],
+ [im_scale_x, im_scale_y])
+
+ # apply semantic
+ if 'semantic' in sample and sample['semantic']:
+ semantic = sample['semantic']
+ semantic = cv2.resize(
+ semantic.astype('float32'),
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=self.interp)
+ semantic = np.asarray(semantic).astype('int32')
+ semantic = np.expand_dims(semantic, 0)
+ sample['semantic'] = semantic
+
+ # apply gt_segm
+ if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
+ masks = [
+ cv2.resize(
+ gt_segm,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=cv2.INTER_NEAREST)
+ for gt_segm in sample['gt_segm']
+ ]
+ sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
+
+ return sample
+
+
+@register_op
+class MultiscaleTestResize(BaseOperator):
+ def __init__(self,
+ origin_target_size=[800, 1333],
+ target_size=[],
+ interp=cv2.INTER_LINEAR,
+ use_flip=True):
+ """
+ Rescale image to the each size in target size, and capped at max_size.
+ Args:
+ origin_target_size (list): origin target size of image
+ target_size (list): A list of target sizes of image.
+ interp (int): the interpolation method.
+ use_flip (bool): whether use flip augmentation.
+ """
+ super(MultiscaleTestResize, self).__init__()
+ self.interp = interp
+ self.use_flip = use_flip
+
+ if not isinstance(target_size, Sequence):
+ raise TypeError(
+ "Type of target_size is invalid. Must be List or Tuple, now is {}".
+ format(type(target_size)))
+ self.target_size = target_size
+
+ if not isinstance(origin_target_size, Sequence):
+ raise TypeError(
+ "Type of origin_target_size is invalid. Must be List or Tuple, now is {}".
+ format(type(origin_target_size)))
+
+ self.origin_target_size = origin_target_size
+
+ def apply(self, sample, context=None):
+ """ Resize the image numpy for multi-scale test.
+ """
+ samples = []
+ resizer = Resize(
+ self.origin_target_size, keep_ratio=True, interp=self.interp)
+ samples.append(resizer(sample.copy(), context))
+ if self.use_flip:
+ flipper = RandomFlip(1.1)
+ samples.append(flipper(sample.copy(), context=context))
+
+ for size in self.target_size:
+ resizer = Resize(size, keep_ratio=True, interp=self.interp)
+ samples.append(resizer(sample.copy(), context))
+
+ return samples
+
+
+@register_op
+class RandomResize(BaseOperator):
+ def __init__(self,
+ target_size,
+ keep_ratio=True,
+ interp=cv2.INTER_LINEAR,
+ random_size=True,
+ random_interp=False):
+ """
+ Resize image to target size randomly. random target_size and interpolation method
+ Args:
+ target_size (int, list, tuple): image target size, if random size is True, must be list or tuple
+ keep_ratio (bool): whether keep_raio or not, default true
+ interp (int): the interpolation method
+ random_size (bool): whether random select target size of image
+ random_interp (bool): whether random select interpolation method
+ """
+ super(RandomResize, self).__init__()
+ self.keep_ratio = keep_ratio
+ self.interp = interp
+ self.interps = [
+ cv2.INTER_NEAREST,
+ cv2.INTER_LINEAR,
+ cv2.INTER_AREA,
+ cv2.INTER_CUBIC,
+ cv2.INTER_LANCZOS4,
+ ]
+ assert isinstance(target_size, (
+ Integral, Sequence)), "target_size must be Integer, List or Tuple"
+ if random_size and not isinstance(target_size, Sequence):
+ raise TypeError(
+ "Type of target_size is invalid when random_size is True. Must be List or Tuple, now is {}".
+ format(type(target_size)))
+ self.target_size = target_size
+ self.random_size = random_size
+ self.random_interp = random_interp
+
+ def apply(self, sample, context=None):
+ """ Resize the image numpy.
+ """
+ if self.random_size:
+ target_size = random.choice(self.target_size)
+ else:
+ target_size = self.target_size
+
+ if self.random_interp:
+ interp = random.choice(self.interps)
+ else:
+ interp = self.interp
+
+ resizer = Resize(target_size, self.keep_ratio, interp)
+ return resizer(sample, context=context)
+
+
+@register_op
+class RandomExpand(BaseOperator):
+ """Random expand the canvas.
+ Args:
+ ratio (float): maximum expansion ratio.
+ prob (float): probability to expand.
+ fill_value (list): color value used to fill the canvas. in RGB order.
+ """
+
+ def __init__(self, ratio=4., prob=0.5, fill_value=(127.5, 127.5, 127.5)):
+ super(RandomExpand, self).__init__()
+ assert ratio > 1.01, "expand ratio must be larger than 1.01"
+ self.ratio = ratio
+ self.prob = prob
+ assert isinstance(fill_value, (Number, Sequence)), \
+ "fill value must be either float or sequence"
+ if isinstance(fill_value, Number):
+ fill_value = (fill_value, ) * 3
+ if not isinstance(fill_value, tuple):
+ fill_value = tuple(fill_value)
+ self.fill_value = fill_value
+
+ def apply(self, sample, context=None):
+ if np.random.uniform(0., 1.) < self.prob:
+ return sample
+
+ im = sample['image']
+ height, width = im.shape[:2]
+ ratio = np.random.uniform(1., self.ratio)
+ h = int(height * ratio)
+ w = int(width * ratio)
+ if not h > height or not w > width:
+ return sample
+ y = np.random.randint(0, h - height)
+ x = np.random.randint(0, w - width)
+ offsets, size = [x, y], [h, w]
+
+ pad = Pad(size,
+ pad_mode=-1,
+ offsets=offsets,
+ fill_value=self.fill_value)
+
+ return pad(sample, context=context)
+
+
+@register_op
+class CropWithSampling(BaseOperator):
+ def __init__(self, batch_sampler, satisfy_all=False, avoid_no_bbox=True):
+ """
+ Args:
+ batch_sampler (list): Multiple sets of different
+ parameters for cropping.
+ satisfy_all (bool): whether all boxes must satisfy.
+ e.g.[[1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0],
+ [1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 1.0],
+ [1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 1.0],
+ [1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 1.0],
+ [1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 1.0],
+ [1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 1.0],
+ [1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0]]
+ [max sample, max trial, min scale, max scale,
+ min aspect ratio, max aspect ratio,
+ min overlap, max overlap]
+ avoid_no_bbox (bool): whether to to avoid the
+ situation where the box does not appear.
+ """
+ super(CropWithSampling, self).__init__()
+ self.batch_sampler = batch_sampler
+ self.satisfy_all = satisfy_all
+ self.avoid_no_bbox = avoid_no_bbox
+
+ def apply(self, sample, context):
+ """
+ Crop the image and modify bounding box.
+ Operators:
+ 1. Scale the image width and height.
+ 2. Crop the image according to a radom sample.
+ 3. Rescale the bounding box.
+ 4. Determine if the new bbox is satisfied in the new image.
+ Returns:
+ sample: the image, bounding box are replaced.
+ """
+ assert 'image' in sample, "image data not found"
+ im = sample['image']
+ gt_bbox = sample['gt_bbox']
+ gt_class = sample['gt_class']
+ im_height, im_width = im.shape[:2]
+ gt_score = None
+ if 'gt_score' in sample:
+ gt_score = sample['gt_score']
+ sampled_bbox = []
+ gt_bbox = gt_bbox.tolist()
+ for sampler in self.batch_sampler:
+ found = 0
+ for i in range(sampler[1]):
+ if found >= sampler[0]:
+ break
+ sample_bbox = generate_sample_bbox(sampler)
+ if satisfy_sample_constraint(sampler, sample_bbox, gt_bbox,
+ self.satisfy_all):
+ sampled_bbox.append(sample_bbox)
+ found = found + 1
+ im = np.array(im)
+ while sampled_bbox:
+ idx = int(np.random.uniform(0, len(sampled_bbox)))
+ sample_bbox = sampled_bbox.pop(idx)
+ sample_bbox = clip_bbox(sample_bbox)
+ crop_bbox, crop_class, crop_score = \
+ filter_and_process(sample_bbox, gt_bbox, gt_class, scores=gt_score)
+ if self.avoid_no_bbox:
+ if len(crop_bbox) < 1:
+ continue
+ xmin = int(sample_bbox[0] * im_width)
+ xmax = int(sample_bbox[2] * im_width)
+ ymin = int(sample_bbox[1] * im_height)
+ ymax = int(sample_bbox[3] * im_height)
+ im = im[ymin:ymax, xmin:xmax]
+ sample['image'] = im
+ sample['gt_bbox'] = crop_bbox
+ sample['gt_class'] = crop_class
+ sample['gt_score'] = crop_score
+ return sample
+ return sample
+
+
+@register_op
+class CropWithDataAchorSampling(BaseOperator):
+ def __init__(self,
+ batch_sampler,
+ anchor_sampler=None,
+ target_size=None,
+ das_anchor_scales=[16, 32, 64, 128],
+ sampling_prob=0.5,
+ min_size=8.,
+ avoid_no_bbox=True):
+ """
+ Args:
+ anchor_sampler (list): anchor_sampling sets of different
+ parameters for cropping.
+ batch_sampler (list): Multiple sets of different
+ parameters for cropping.
+ e.g.[[1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2, 0.0]]
+ [[1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
+ [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
+ [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
+ [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
+ [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0]]
+ [max sample, max trial, min scale, max scale,
+ min aspect ratio, max aspect ratio,
+ min overlap, max overlap, min coverage, max coverage]
+ target_size (int): target image size.
+ das_anchor_scales (list[float]): a list of anchor scales in data
+ anchor smapling.
+ min_size (float): minimum size of sampled bbox.
+ avoid_no_bbox (bool): whether to to avoid the
+ situation where the box does not appear.
+ """
+ super(CropWithDataAchorSampling, self).__init__()
+ self.anchor_sampler = anchor_sampler
+ self.batch_sampler = batch_sampler
+ self.target_size = target_size
+ self.sampling_prob = sampling_prob
+ self.min_size = min_size
+ self.avoid_no_bbox = avoid_no_bbox
+ self.das_anchor_scales = np.array(das_anchor_scales)
+
+ def apply(self, sample, context):
+ """
+ Crop the image and modify bounding box.
+ Operators:
+ 1. Scale the image width and height.
+ 2. Crop the image according to a radom sample.
+ 3. Rescale the bounding box.
+ 4. Determine if the new bbox is satisfied in the new image.
+ Returns:
+ sample: the image, bounding box are replaced.
+ """
+ assert 'image' in sample, "image data not found"
+ im = sample['image']
+ gt_bbox = sample['gt_bbox']
+ gt_class = sample['gt_class']
+ image_height, image_width = im.shape[:2]
+ gt_bbox[:, 0] /= image_width
+ gt_bbox[:, 1] /= image_height
+ gt_bbox[:, 2] /= image_width
+ gt_bbox[:, 3] /= image_height
+ gt_score = None
+ if 'gt_score' in sample:
+ gt_score = sample['gt_score']
+ sampled_bbox = []
+ gt_bbox = gt_bbox.tolist()
+
+ prob = np.random.uniform(0., 1.)
+ if prob > self.sampling_prob: # anchor sampling
+ assert self.anchor_sampler
+ for sampler in self.anchor_sampler:
+ found = 0
+ for i in range(sampler[1]):
+ if found >= sampler[0]:
+ break
+ sample_bbox = data_anchor_sampling(
+ gt_bbox, image_width, image_height,
+ self.das_anchor_scales, self.target_size)
+ if sample_bbox == 0:
+ break
+ if satisfy_sample_constraint_coverage(sampler, sample_bbox,
+ gt_bbox):
+ sampled_bbox.append(sample_bbox)
+ found = found + 1
+ im = np.array(im)
+ while sampled_bbox:
+ idx = int(np.random.uniform(0, len(sampled_bbox)))
+ sample_bbox = sampled_bbox.pop(idx)
+
+ if 'gt_keypoint' in sample.keys():
+ keypoints = (sample['gt_keypoint'],
+ sample['keypoint_ignore'])
+ crop_bbox, crop_class, crop_score, gt_keypoints = \
+ filter_and_process(sample_bbox, gt_bbox, gt_class,
+ scores=gt_score,
+ keypoints=keypoints)
+ else:
+ crop_bbox, crop_class, crop_score = filter_and_process(
+ sample_bbox, gt_bbox, gt_class, scores=gt_score)
+ crop_bbox, crop_class, crop_score = bbox_area_sampling(
+ crop_bbox, crop_class, crop_score, self.target_size,
+ self.min_size)
+
+ if self.avoid_no_bbox:
+ if len(crop_bbox) < 1:
+ continue
+ im = crop_image_sampling(im, sample_bbox, image_width,
+ image_height, self.target_size)
+ height, width = im.shape[:2]
+ crop_bbox[:, 0] *= width
+ crop_bbox[:, 1] *= height
+ crop_bbox[:, 2] *= width
+ crop_bbox[:, 3] *= height
+ sample['image'] = im
+ sample['gt_bbox'] = crop_bbox
+ sample['gt_class'] = crop_class
+ if 'gt_score' in sample:
+ sample['gt_score'] = crop_score
+ if 'gt_keypoint' in sample.keys():
+ sample['gt_keypoint'] = gt_keypoints[0]
+ sample['keypoint_ignore'] = gt_keypoints[1]
+ return sample
+ return sample
+
+ else:
+ for sampler in self.batch_sampler:
+ found = 0
+ for i in range(sampler[1]):
+ if found >= sampler[0]:
+ break
+ sample_bbox = generate_sample_bbox_square(
+ sampler, image_width, image_height)
+ if satisfy_sample_constraint_coverage(sampler, sample_bbox,
+ gt_bbox):
+ sampled_bbox.append(sample_bbox)
+ found = found + 1
+ im = np.array(im)
+ while sampled_bbox:
+ idx = int(np.random.uniform(0, len(sampled_bbox)))
+ sample_bbox = sampled_bbox.pop(idx)
+ sample_bbox = clip_bbox(sample_bbox)
+
+ if 'gt_keypoint' in sample.keys():
+ keypoints = (sample['gt_keypoint'],
+ sample['keypoint_ignore'])
+ crop_bbox, crop_class, crop_score, gt_keypoints = \
+ filter_and_process(sample_bbox, gt_bbox, gt_class,
+ scores=gt_score,
+ keypoints=keypoints)
+ else:
+ crop_bbox, crop_class, crop_score = filter_and_process(
+ sample_bbox, gt_bbox, gt_class, scores=gt_score)
+ # sampling bbox according the bbox area
+ crop_bbox, crop_class, crop_score = bbox_area_sampling(
+ crop_bbox, crop_class, crop_score, self.target_size,
+ self.min_size)
+
+ if self.avoid_no_bbox:
+ if len(crop_bbox) < 1:
+ continue
+ xmin = int(sample_bbox[0] * image_width)
+ xmax = int(sample_bbox[2] * image_width)
+ ymin = int(sample_bbox[1] * image_height)
+ ymax = int(sample_bbox[3] * image_height)
+ im = im[ymin:ymax, xmin:xmax]
+ height, width = im.shape[:2]
+ crop_bbox[:, 0] *= width
+ crop_bbox[:, 1] *= height
+ crop_bbox[:, 2] *= width
+ crop_bbox[:, 3] *= height
+ sample['image'] = im
+ sample['gt_bbox'] = crop_bbox
+ sample['gt_class'] = crop_class
+ if 'gt_score' in sample:
+ sample['gt_score'] = crop_score
+ if 'gt_keypoint' in sample.keys():
+ sample['gt_keypoint'] = gt_keypoints[0]
+ sample['keypoint_ignore'] = gt_keypoints[1]
+ return sample
+ return sample
+
+
+@register_op
+class RandomCrop(BaseOperator):
+ """Random crop image and bboxes.
+ Args:
+ aspect_ratio (list): aspect ratio of cropped region.
+ in [min, max] format.
+ thresholds (list): iou thresholds for decide a valid bbox crop.
+ scaling (list): ratio between a cropped region and the original image.
+ in [min, max] format.
+ num_attempts (int): number of tries before giving up.
+ allow_no_crop (bool): allow return without actually cropping them.
+ cover_all_box (bool): ensure all bboxes are covered in the final crop.
+ is_mask_crop(bool): whether crop the segmentation.
+ """
+
+ def __init__(self,
+ aspect_ratio=[.5, 2.],
+ thresholds=[.0, .1, .3, .5, .7, .9],
+ scaling=[.3, 1.],
+ num_attempts=50,
+ allow_no_crop=True,
+ cover_all_box=False,
+ is_mask_crop=False):
+ super(RandomCrop, self).__init__()
+ self.aspect_ratio = aspect_ratio
+ self.thresholds = thresholds
+ self.scaling = scaling
+ self.num_attempts = num_attempts
+ self.allow_no_crop = allow_no_crop
+ self.cover_all_box = cover_all_box
+ self.is_mask_crop = is_mask_crop
+
+ def crop_segms(self, segms, valid_ids, crop, height, width):
+ def _crop_poly(segm, crop):
+ xmin, ymin, xmax, ymax = crop
+ crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]
+ crop_p = np.array(crop_coord).reshape(4, 2)
+ crop_p = Polygon(crop_p)
+
+ crop_segm = list()
+ for poly in segm:
+ poly = np.array(poly).reshape(len(poly) // 2, 2)
+ polygon = Polygon(poly)
+ if not polygon.is_valid:
+ exterior = polygon.exterior
+ multi_lines = exterior.intersection(exterior)
+ polygons = shapely.ops.polygonize(multi_lines)
+ polygon = MultiPolygon(polygons)
+ multi_polygon = list()
+ if isinstance(polygon, MultiPolygon):
+ multi_polygon = copy.deepcopy(polygon)
+ else:
+ multi_polygon.append(copy.deepcopy(polygon))
+ for per_polygon in multi_polygon:
+ inter = per_polygon.intersection(crop_p)
+ if not inter:
+ continue
+ if isinstance(inter, (MultiPolygon, GeometryCollection)):
+ for part in inter:
+ if not isinstance(part, Polygon):
+ continue
+ part = np.squeeze(
+ np.array(part.exterior.coords[:-1]).reshape(1,
+ -1))
+ part[0::2] -= xmin
+ part[1::2] -= ymin
+ crop_segm.append(part.tolist())
+ elif isinstance(inter, Polygon):
+ crop_poly = np.squeeze(
+ np.array(inter.exterior.coords[:-1]).reshape(1, -1))
+ crop_poly[0::2] -= xmin
+ crop_poly[1::2] -= ymin
+ crop_segm.append(crop_poly.tolist())
+ else:
+ continue
+ return crop_segm
+
+ def _crop_rle(rle, crop, height, width):
+ if 'counts' in rle and type(rle['counts']) == list:
+ rle = mask_util.frPyObjects(rle, height, width)
+ mask = mask_util.decode(rle)
+ mask = mask[crop[1]:crop[3], crop[0]:crop[2]]
+ rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
+ return rle
+
+ crop_segms = []
+ for id in valid_ids:
+ segm = segms[id]
+ if is_poly(segm):
+ import copy
+ import shapely.ops
+ from shapely.geometry import Polygon, MultiPolygon, GeometryCollection
+ logging.getLogger("shapely").setLevel(logging.WARNING)
+ # Polygon format
+ crop_segms.append(_crop_poly(segm, crop))
+ else:
+ # RLE format
+ import pycocotools.mask as mask_util
+ crop_segms.append(_crop_rle(segm, crop, height, width))
+ return crop_segms
+
+ def apply(self, sample, context=None):
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
+ return sample
+
+ h, w = sample['image'].shape[:2]
+ gt_bbox = sample['gt_bbox']
+
+ # NOTE Original method attempts to generate one candidate for each
+ # threshold then randomly sample one from the resulting list.
+ # Here a short circuit approach is taken, i.e., randomly choose a
+ # threshold and attempt to find a valid crop, and simply return the
+ # first one found.
+ # The probability is not exactly the same, kinda resembling the
+ # "Monty Hall" problem. Actually carrying out the attempts will affect
+ # observability (just like opening doors in the "Monty Hall" game).
+ thresholds = list(self.thresholds)
+ if self.allow_no_crop:
+ thresholds.append('no_crop')
+ np.random.shuffle(thresholds)
+
+ for thresh in thresholds:
+ if thresh == 'no_crop':
+ return sample
+
+ found = False
+ for i in range(self.num_attempts):
+ scale = np.random.uniform(*self.scaling)
+ if self.aspect_ratio is not None:
+ min_ar, max_ar = self.aspect_ratio
+ aspect_ratio = np.random.uniform(
+ max(min_ar, scale**2), min(max_ar, scale**-2))
+ h_scale = scale / np.sqrt(aspect_ratio)
+ w_scale = scale * np.sqrt(aspect_ratio)
+ else:
+ h_scale = np.random.uniform(*self.scaling)
+ w_scale = np.random.uniform(*self.scaling)
+ crop_h = h * h_scale
+ crop_w = w * w_scale
+ if self.aspect_ratio is None:
+ if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:
+ continue
+
+ crop_h = int(crop_h)
+ crop_w = int(crop_w)
+ crop_y = np.random.randint(0, h - crop_h)
+ crop_x = np.random.randint(0, w - crop_w)
+ crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
+ iou = self._iou_matrix(
+ gt_bbox, np.array(
+ [crop_box], dtype=np.float32))
+ if iou.max() < thresh:
+ continue
+
+ if self.cover_all_box and iou.min() < thresh:
+ continue
+
+ cropped_box, valid_ids = self._crop_box_with_center_constraint(
+ gt_bbox, np.array(
+ crop_box, dtype=np.float32))
+ if valid_ids.size > 0:
+ found = True
+ break
+
+ if found:
+ if self.is_mask_crop and 'gt_poly' in sample and len(sample[
+ 'gt_poly']) > 0:
+ crop_polys = self.crop_segms(
+ sample['gt_poly'],
+ valid_ids,
+ np.array(
+ crop_box, dtype=np.int64),
+ h,
+ w)
+ if [] in crop_polys:
+ delete_id = list()
+ valid_polys = list()
+ for id, crop_poly in enumerate(crop_polys):
+ if crop_poly == []:
+ delete_id.append(id)
+ else:
+ valid_polys.append(crop_poly)
+ valid_ids = np.delete(valid_ids, delete_id)
+ if len(valid_polys) == 0:
+ return sample
+ sample['gt_poly'] = valid_polys
+ else:
+ sample['gt_poly'] = crop_polys
+
+ if 'gt_segm' in sample:
+ sample['gt_segm'] = self._crop_segm(sample['gt_segm'],
+ crop_box)
+ sample['gt_segm'] = np.take(
+ sample['gt_segm'], valid_ids, axis=0)
+
+ sample['image'] = self._crop_image(sample['image'], crop_box)
+ sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
+ sample['gt_class'] = np.take(
+ sample['gt_class'], valid_ids, axis=0)
+ if 'gt_score' in sample:
+ sample['gt_score'] = np.take(
+ sample['gt_score'], valid_ids, axis=0)
+
+ if 'is_crowd' in sample:
+ sample['is_crowd'] = np.take(
+ sample['is_crowd'], valid_ids, axis=0)
+ return sample
+
+ return sample
+
+ def _iou_matrix(self, a, b):
+ tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])
+ br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
+
+ area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)
+ area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
+ area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
+ area_o = (area_a[:, np.newaxis] + area_b - area_i)
+ return area_i / (area_o + 1e-10)
+
+ def _crop_box_with_center_constraint(self, box, crop):
+ cropped_box = box.copy()
+
+ cropped_box[:, :2] = np.maximum(box[:, :2], crop[:2])
+ cropped_box[:, 2:] = np.minimum(box[:, 2:], crop[2:])
+ cropped_box[:, :2] -= crop[:2]
+ cropped_box[:, 2:] -= crop[:2]
+
+ centers = (box[:, :2] + box[:, 2:]) / 2
+ valid = np.logical_and(crop[:2] <= centers,
+ centers < crop[2:]).all(axis=1)
+ valid = np.logical_and(
+ valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))
+
+ return cropped_box, np.where(valid)[0]
+
+ def _crop_image(self, img, crop):
+ x1, y1, x2, y2 = crop
+ return img[y1:y2, x1:x2, :]
+
+ def _crop_segm(self, segm, crop):
+ x1, y1, x2, y2 = crop
+ return segm[:, y1:y2, x1:x2]
+
+
+@register_op
+class RandomScaledCrop(BaseOperator):
+ """Resize image and bbox based on long side (with optional random scaling),
+ then crop or pad image to target size.
+ Args:
+ target_dim (int): target size.
+ scale_range (list): random scale range.
+ interp (int): interpolation method, default to `cv2.INTER_LINEAR`.
+ """
+
+ def __init__(self,
+ target_dim=512,
+ scale_range=[.1, 2.],
+ interp=cv2.INTER_LINEAR):
+ super(RandomScaledCrop, self).__init__()
+ self.target_dim = target_dim
+ self.scale_range = scale_range
+ self.interp = interp
+
+ def apply(self, sample, context=None):
+ img = sample['image']
+ h, w = img.shape[:2]
+ random_scale = np.random.uniform(*self.scale_range)
+ dim = self.target_dim
+ random_dim = int(dim * random_scale)
+ dim_max = max(h, w)
+ scale = random_dim / dim_max
+ resize_w = w * scale
+ resize_h = h * scale
+ offset_x = int(max(0, np.random.uniform(0., resize_w - dim)))
+ offset_y = int(max(0, np.random.uniform(0., resize_h - dim)))
+
+ img = cv2.resize(img, (resize_w, resize_h), interpolation=self.interp)
+ img = np.array(img)
+ canvas = np.zeros((dim, dim, 3), dtype=img.dtype)
+ canvas[:min(dim, resize_h), :min(dim, resize_w), :] = img[
+ offset_y:offset_y + dim, offset_x:offset_x + dim, :]
+ sample['image'] = canvas
+ sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
+ scale_factor = sample['sacle_factor']
+ sample['scale_factor'] = np.asarray(
+ [scale_factor[0] * scale, scale_factor[1] * scale],
+ dtype=np.float32)
+
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
+ scale_array = np.array([scale, scale] * 2, dtype=np.float32)
+ shift_array = np.array([offset_x, offset_y] * 2, dtype=np.float32)
+ boxes = sample['gt_bbox'] * scale_array - shift_array
+ boxes = np.clip(boxes, 0, dim - 1)
+ # filter boxes with no area
+ area = np.prod(boxes[..., 2:] - boxes[..., :2], axis=1)
+ valid = (area > 1.).nonzero()[0]
+ sample['gt_bbox'] = boxes[valid]
+ sample['gt_class'] = sample['gt_class'][valid]
+
+ return sample
+
+
+@register_op
+class Cutmix(BaseOperator):
+ def __init__(self, alpha=1.5, beta=1.5):
+ """
+ CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features, see https://arxiv.org/abs/1905.04899
+ Cutmix image and gt_bbbox/gt_score
+ Args:
+ alpha (float): alpha parameter of beta distribute
+ beta (float): beta parameter of beta distribute
+ """
+ super(Cutmix, self).__init__()
+ self.alpha = alpha
+ self.beta = beta
+ if self.alpha <= 0.0:
+ raise ValueError("alpha shold be positive in {}".format(self))
+ if self.beta <= 0.0:
+ raise ValueError("beta shold be positive in {}".format(self))
+
+ def apply_image(self, img1, img2, factor):
+ """ _rand_bbox """
+ h = max(img1.shape[0], img2.shape[0])
+ w = max(img1.shape[1], img2.shape[1])
+ cut_rat = np.sqrt(1. - factor)
+
+ cut_w = np.int32(w * cut_rat)
+ cut_h = np.int32(h * cut_rat)
+
+ # uniform
+ cx = np.random.randint(w)
+ cy = np.random.randint(h)
+
+ bbx1 = np.clip(cx - cut_w // 2, 0, w - 1)
+ bby1 = np.clip(cy - cut_h // 2, 0, h - 1)
+ bbx2 = np.clip(cx + cut_w // 2, 0, w - 1)
+ bby2 = np.clip(cy + cut_h // 2, 0, h - 1)
+
+ img_1_pad = np.zeros((h, w, img1.shape[2]), 'float32')
+ img_1_pad[:img1.shape[0], :img1.shape[1], :] = \
+ img1.astype('float32')
+ img_2_pad = np.zeros((h, w, img2.shape[2]), 'float32')
+ img_2_pad[:img2.shape[0], :img2.shape[1], :] = \
+ img2.astype('float32')
+ img_1_pad[bby1:bby2, bbx1:bbx2, :] = img_2_pad[bby1:bby2, bbx1:bbx2, :]
+ return img_1_pad
+
+ def __call__(self, sample, context=None):
+ if not isinstance(sample, Sequence):
+ return sample
+
+ assert len(sample) == 2, 'cutmix need two samples'
+
+ factor = np.random.beta(self.alpha, self.beta)
+ factor = max(0.0, min(1.0, factor))
+ if factor >= 1.0:
+ return sample[0]
+ if factor <= 0.0:
+ return sample[1]
+ img1 = sample[0]['image']
+ img2 = sample[1]['image']
+ img = self.apply_image(img1, img2, factor)
+ gt_bbox1 = sample[0]['gt_bbox']
+ gt_bbox2 = sample[1]['gt_bbox']
+ gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
+ gt_class1 = sample[0]['gt_class']
+ gt_class2 = sample[1]['gt_class']
+ gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
+ gt_score1 = np.ones_like(sample[0]['gt_class'])
+ gt_score2 = np.ones_like(sample[1]['gt_class'])
+ gt_score = np.concatenate(
+ (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
+ result = copy.deepcopy(sample[0])
+ result['image'] = img
+ result['gt_bbox'] = gt_bbox
+ result['gt_score'] = gt_score
+ result['gt_class'] = gt_class
+ if 'is_crowd' in sample[0]:
+ is_crowd1 = sample[0]['is_crowd']
+ is_crowd2 = sample[1]['is_crowd']
+ is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
+ result['is_crowd'] = is_crowd
+ if 'difficult' in sample[0]:
+ is_difficult1 = sample[0]['difficult']
+ is_difficult2 = sample[1]['difficult']
+ is_difficult = np.concatenate(
+ (is_difficult1, is_difficult2), axis=0)
+ result['difficult'] = is_difficult
+ return result
+
+
+@register_op
+class Mixup(BaseOperator):
+ def __init__(self, alpha=1.5, beta=1.5):
+ """ Mixup image and gt_bbbox/gt_score
+ Args:
+ alpha (float): alpha parameter of beta distribute
+ beta (float): beta parameter of beta distribute
+ """
+ super(Mixup, self).__init__()
+ self.alpha = alpha
+ self.beta = beta
+ if self.alpha <= 0.0:
+ raise ValueError("alpha shold be positive in {}".format(self))
+ if self.beta <= 0.0:
+ raise ValueError("beta shold be positive in {}".format(self))
+
+ def apply_image(self, img1, img2, factor):
+ h = max(img1.shape[0], img2.shape[0])
+ w = max(img1.shape[1], img2.shape[1])
+ img = np.zeros((h, w, img1.shape[2]), 'float32')
+ img[:img1.shape[0], :img1.shape[1], :] = \
+ img1.astype('float32') * factor
+ img[:img2.shape[0], :img2.shape[1], :] += \
+ img2.astype('float32') * (1.0 - factor)
+ return img.astype('uint8')
+
+ def __call__(self, sample, context=None):
+ if not isinstance(sample, Sequence):
+ return sample
+
+ assert len(sample) == 2, 'mixup need two samples'
+
+ factor = np.random.beta(self.alpha, self.beta)
+ factor = max(0.0, min(1.0, factor))
+ if factor >= 1.0:
+ return sample[0]
+ if factor <= 0.0:
+ return sample[1]
+ im = self.apply_image(sample[0]['image'], sample[1]['image'], factor)
+ result = copy.deepcopy(sample[0])
+ result['image'] = im
+ # apply bbox and score
+ if 'gt_bbox' in sample[0]:
+ gt_bbox1 = sample[0]['gt_bbox']
+ gt_bbox2 = sample[1]['gt_bbox']
+ gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
+ result['gt_bbox'] = gt_bbox
+ if 'gt_class' in sample[0]:
+ gt_class1 = sample[0]['gt_class']
+ gt_class2 = sample[1]['gt_class']
+ gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
+ result['gt_class'] = gt_class
+
+ gt_score1 = np.ones_like(sample[0]['gt_class'])
+ gt_score2 = np.ones_like(sample[1]['gt_class'])
+ gt_score = np.concatenate(
+ (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
+ result['gt_score'] = gt_score
+ if 'is_crowd' in sample[0]:
+ is_crowd1 = sample[0]['is_crowd']
+ is_crowd2 = sample[1]['is_crowd']
+ is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
+ result['is_crowd'] = is_crowd
+ if 'difficult' in sample[0]:
+ is_difficult1 = sample[0]['difficult']
+ is_difficult2 = sample[1]['difficult']
+ is_difficult = np.concatenate(
+ (is_difficult1, is_difficult2), axis=0)
+ result['difficult'] = is_difficult
+
+ if 'gt_ide' in sample[0]:
+ gt_ide1 = sample[0]['gt_ide']
+ gt_ide2 = sample[1]['gt_ide']
+ gt_ide = np.concatenate((gt_ide1, gt_ide2), axis=0)
+ result['gt_ide'] = gt_ide
+ return result
+
+
+@register_op
+class NormalizeBox(BaseOperator):
+ """Transform the bounding box's coornidates to [0,1]."""
+
+ def __init__(self):
+ super(NormalizeBox, self).__init__()
+
+ def apply(self, sample, context):
+ im = sample['image']
+ gt_bbox = sample['gt_bbox']
+ height, width, _ = im.shape
+ for i in range(gt_bbox.shape[0]):
+ gt_bbox[i][0] = gt_bbox[i][0] / width
+ gt_bbox[i][1] = gt_bbox[i][1] / height
+ gt_bbox[i][2] = gt_bbox[i][2] / width
+ gt_bbox[i][3] = gt_bbox[i][3] / height
+ sample['gt_bbox'] = gt_bbox
+
+ if 'gt_keypoint' in sample.keys():
+ gt_keypoint = sample['gt_keypoint']
+
+ for i in range(gt_keypoint.shape[1]):
+ if i % 2:
+ gt_keypoint[:, i] = gt_keypoint[:, i] / height
+ else:
+ gt_keypoint[:, i] = gt_keypoint[:, i] / width
+ sample['gt_keypoint'] = gt_keypoint
+
+ return sample
+
+
+@register_op
+class BboxXYXY2XYWH(BaseOperator):
+ """
+ Convert bbox XYXY format to XYWH format.
+ """
+
+ def __init__(self):
+ super(BboxXYXY2XYWH, self).__init__()
+
+ def apply(self, sample, context=None):
+ assert 'gt_bbox' in sample
+ bbox = sample['gt_bbox']
+ bbox[:, 2:4] = bbox[:, 2:4] - bbox[:, :2]
+ bbox[:, :2] = bbox[:, :2] + bbox[:, 2:4] / 2.
+ sample['gt_bbox'] = bbox
+ return sample
+
+
+@register_op
+class PadBox(BaseOperator):
+ def __init__(self, num_max_boxes=50):
+ """
+ Pad zeros to bboxes if number of bboxes is less than num_max_boxes.
+ Args:
+ num_max_boxes (int): the max number of bboxes
+ """
+ self.num_max_boxes = num_max_boxes
+ super(PadBox, self).__init__()
+
+ def apply(self, sample, context=None):
+ assert 'gt_bbox' in sample
+ bbox = sample['gt_bbox']
+ gt_num = min(self.num_max_boxes, len(bbox))
+ num_max = self.num_max_boxes
+ # fields = context['fields'] if context else []
+ pad_bbox = np.zeros((num_max, 4), dtype=np.float32)
+ if gt_num > 0:
+ pad_bbox[:gt_num, :] = bbox[:gt_num, :]
+ sample['gt_bbox'] = pad_bbox
+ if 'gt_class' in sample:
+ pad_class = np.zeros((num_max, ), dtype=np.int32)
+ if gt_num > 0:
+ pad_class[:gt_num] = sample['gt_class'][:gt_num, 0]
+ sample['gt_class'] = pad_class
+ if 'gt_score' in sample:
+ pad_score = np.zeros((num_max, ), dtype=np.float32)
+ if gt_num > 0:
+ pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]
+ sample['gt_score'] = pad_score
+ # in training, for example in op ExpandImage,
+ # the bbox and gt_class is expandded, but the difficult is not,
+ # so, judging by it's length
+ if 'difficult' in sample:
+ pad_diff = np.zeros((num_max, ), dtype=np.int32)
+ if gt_num > 0:
+ pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]
+ sample['difficult'] = pad_diff
+ if 'is_crowd' in sample:
+ pad_crowd = np.zeros((num_max, ), dtype=np.int32)
+ if gt_num > 0:
+ pad_crowd[:gt_num] = sample['is_crowd'][:gt_num, 0]
+ sample['is_crowd'] = pad_crowd
+ if 'gt_ide' in sample:
+ pad_ide = np.zeros((num_max, ), dtype=np.int32)
+ if gt_num > 0:
+ pad_ide[:gt_num] = sample['gt_ide'][:gt_num, 0]
+ sample['gt_ide'] = pad_ide
+ return sample
+
+
+@register_op
+class DebugVisibleImage(BaseOperator):
+ """
+ In debug mode, visualize images according to `gt_box`.
+ (Currently only supported when not cropping and flipping image.)
+ """
+
+ def __init__(self, output_dir='output/debug', is_normalized=False):
+ super(DebugVisibleImage, self).__init__()
+ self.is_normalized = is_normalized
+ self.output_dir = output_dir
+ if not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
+ if not isinstance(self.is_normalized, bool):
+ raise TypeError("{}: input type is invalid.".format(self))
+
+ def apply(self, sample, context=None):
+ image = Image.fromarray(sample['image'].astype(np.uint8))
+ out_file_name = '{:012d}.jpg'.format(sample['im_id'][0])
+ width = sample['w']
+ height = sample['h']
+ gt_bbox = sample['gt_bbox']
+ gt_class = sample['gt_class']
+ draw = ImageDraw.Draw(image)
+ for i in range(gt_bbox.shape[0]):
+ if self.is_normalized:
+ gt_bbox[i][0] = gt_bbox[i][0] * width
+ gt_bbox[i][1] = gt_bbox[i][1] * height
+ gt_bbox[i][2] = gt_bbox[i][2] * width
+ gt_bbox[i][3] = gt_bbox[i][3] * height
+
+ xmin, ymin, xmax, ymax = gt_bbox[i]
+ draw.line(
+ [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
+ (xmin, ymin)],
+ width=2,
+ fill='green')
+ # draw label
+ text = str(gt_class[i][0])
+ tw, th = draw.textsize(text)
+ draw.rectangle(
+ [(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill='green')
+ draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
+
+ if 'gt_keypoint' in sample.keys():
+ gt_keypoint = sample['gt_keypoint']
+ if self.is_normalized:
+ for i in range(gt_keypoint.shape[1]):
+ if i % 2:
+ gt_keypoint[:, i] = gt_keypoint[:, i] * height
+ else:
+ gt_keypoint[:, i] = gt_keypoint[:, i] * width
+ for i in range(gt_keypoint.shape[0]):
+ keypoint = gt_keypoint[i]
+ for j in range(int(keypoint.shape[0] / 2)):
+ x1 = round(keypoint[2 * j]).astype(np.int32)
+ y1 = round(keypoint[2 * j + 1]).astype(np.int32)
+ draw.ellipse(
+ (x1, y1, x1 + 5, y1 + 5), fill='green', outline='green')
+ save_path = os.path.join(self.output_dir, out_file_name)
+ image.save(save_path, quality=95)
+ return sample
+
+
+@register_op
+class Pad(BaseOperator):
+ def __init__(self,
+ size=None,
+ size_divisor=32,
+ pad_mode=0,
+ offsets=None,
+ fill_value=(127.5, 127.5, 127.5)):
+ """
+ Pad image to a specified size or multiple of size_divisor.
+ Args:
+ size (int, Sequence): image target size, if None, pad to multiple of size_divisor, default None
+ size_divisor (int): size divisor, default 32
+ pad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets
+ if 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top
+ offsets (list): [offset_x, offset_y], specify offset while padding, only supported pad_mode=-1
+ fill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5)
+ """
+ super(Pad, self).__init__()
+
+ if not isinstance(size, (int, Sequence)):
+ raise TypeError(
+ "Type of target_size is invalid when random_size is True. \
+ Must be List, now is {}".format(type(size)))
+
+ if isinstance(size, int):
+ size = [size, size]
+
+ assert pad_mode in [
+ -1, 0, 1, 2
+ ], 'currently only supports four modes [-1, 0, 1, 2]'
+ if pad_mode == -1:
+ assert offsets, 'if pad_mode is -1, offsets should not be None'
+
+ self.size = size
+ self.size_divisor = size_divisor
+ self.pad_mode = pad_mode
+ self.fill_value = fill_value
+ self.offsets = offsets
+
+ def apply_segm(self, segms, offsets, im_size, size):
+ def _expand_poly(poly, x, y):
+ expanded_poly = np.array(poly)
+ expanded_poly[0::2] += x
+ expanded_poly[1::2] += y
+ return expanded_poly.tolist()
+
+ def _expand_rle(rle, x, y, height, width, h, w):
+ if 'counts' in rle and type(rle['counts']) == list:
+ rle = mask_util.frPyObjects(rle, height, width)
+ mask = mask_util.decode(rle)
+ expanded_mask = np.full((h, w), 0).astype(mask.dtype)
+ expanded_mask[y:y + height, x:x + width] = mask
+ rle = mask_util.encode(
+ np.array(
+ expanded_mask, order='F', dtype=np.uint8))
+ return rle
+
+ x, y = offsets
+ height, width = im_size
+ h, w = size
+ expanded_segms = []
+ for segm in segms:
+ if is_poly(segm):
+ # Polygon format
+ expanded_segms.append(
+ [_expand_poly(poly, x, y) for poly in segm])
+ else:
+ # RLE format
+ import pycocotools.mask as mask_util
+ expanded_segms.append(
+ _expand_rle(segm, x, y, height, width, h, w))
+ return expanded_segms
+
+ def apply_bbox(self, bbox, offsets):
+ return bbox + np.array(offsets * 2, dtype=np.float32)
+
+ def apply_keypoint(self, keypoints, offsets):
+ n = len(keypoints[0]) // 2
+ return keypoints + np.array(offsets * n, dtype=np.float32)
+
+ def apply_image(self, image, offsets, im_size, size):
+ x, y = offsets
+ im_h, im_w = im_size
+ h, w = size
+ canvas = np.ones((h, w, 3), dtype=np.float32)
+ canvas *= np.array(self.fill_value, dtype=np.float32)
+ canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)
+ return canvas
+
+ def apply(self, sample, context=None):
+ im = sample['image']
+ im_h, im_w = im.shape[:2]
+ if self.size:
+ h, w = self.size
+ assert (
+ im_h < h and im_w < w
+ ), '(h, w) of target size should be greater than (im_h, im_w)'
+ else:
+ h = np.ceil(im_h / self.size_divisor) * self.size_divisor
+ w = np.ceil(im_w / self.size_divisor) * self.size_divisor
+
+ if h == im_h and w == im_w:
+ return sample
+
+ if self.pad_mode == -1:
+ offset_x, offset_y = self.offsets
+ elif self.pad_mode == 0:
+ offset_y, offset_x = 0, 0
+ elif self.pad_mode == 1:
+ offset_y, offset_x = (h - im_h) // 2, (w - im_w) // 2
+ else:
+ offset_y, offset_x = h - im_h, w - im_w
+
+ offsets, im_size, size = [offset_x, offset_y], [im_h, im_w], [h, w]
+
+ sample['image'] = self.apply_image(im, offsets, im_size, size)
+
+ if self.pad_mode == 0:
+ return sample
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
+ sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], offsets)
+
+ if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
+ sample['gt_poly'] = self.apply_segm(sample['gt_poly'], offsets,
+ im_size, size)
+
+ if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:
+ sample['gt_keypoint'] = self.apply_keypoint(sample['gt_keypoint'],
+ offsets)
+
+ return sample
+
+
+@register_op
+class Poly2Mask(BaseOperator):
+ """
+ gt poly to mask annotations
+ """
+
+ def __init__(self):
+ super(Poly2Mask, self).__init__()
+ import pycocotools.mask as maskUtils
+ self.maskutils = maskUtils
+
+ def _poly2mask(self, mask_ann, img_h, img_w):
+ if isinstance(mask_ann, list):
+ # polygon -- a single object might consist of multiple parts
+ # we merge all parts into one mask rle code
+ rles = self.maskutils.frPyObjects(mask_ann, img_h, img_w)
+ rle = self.maskutils.merge(rles)
+ elif isinstance(mask_ann['counts'], list):
+ # uncompressed RLE
+ rle = self.maskutils.frPyObjects(mask_ann, img_h, img_w)
+ else:
+ # rle
+ rle = mask_ann
+ mask = self.maskutils.decode(rle)
+ return mask
+
+ def apply(self, sample, context=None):
+ assert 'gt_poly' in sample
+ im_h = sample['h']
+ im_w = sample['w']
+ masks = [
+ self._poly2mask(gt_poly, im_h, im_w)
+ for gt_poly in sample['gt_poly']
+ ]
+ sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
+ return sample
+
+
+@register_op
+class Rbox2Poly(BaseOperator):
+ """
+ Convert rbbox format to poly format.
+ """
+
+ def __init__(self):
+ super(Rbox2Poly, self).__init__()
+
+ def apply(self, sample, context=None):
+ assert 'gt_rbox' in sample
+ assert sample['gt_rbox'].shape[1] == 5
+ rrects = sample['gt_rbox']
+ x_ctr = rrects[:, 0]
+ y_ctr = rrects[:, 1]
+ width = rrects[:, 2]
+ height = rrects[:, 3]
+ x1 = x_ctr - width / 2.0
+ y1 = y_ctr - height / 2.0
+ x2 = x_ctr + width / 2.0
+ y2 = y_ctr + height / 2.0
+ sample['gt_bbox'] = np.stack([x1, y1, x2, y2], axis=1)
+ polys = bbox_utils.rbox2poly_np(rrects)
+ sample['gt_rbox2poly'] = polys
+ return sample
+
+
+@register_op
+class AugmentHSV(BaseOperator):
+ def __init__(self, fraction=0.50, is_bgr=True):
+ """
+ Augment the SV channel of image data.
+ Args:
+ fraction (float): the fraction for augment. Default: 0.5.
+ is_bgr (bool): whether the image is BGR mode. Default: True.
+ """
+ super(AugmentHSV, self).__init__()
+ self.fraction = fraction
+ self.is_bgr = is_bgr
+
+ def apply(self, sample, context=None):
+ img = sample['image']
+ if self.is_bgr:
+ img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
+ else:
+ img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
+ S = img_hsv[:, :, 1].astype(np.float32)
+ V = img_hsv[:, :, 2].astype(np.float32)
+
+ a = (random.random() * 2 - 1) * self.fraction + 1
+ S *= a
+ if a > 1:
+ np.clip(S, a_min=0, a_max=255, out=S)
+
+ a = (random.random() * 2 - 1) * self.fraction + 1
+ V *= a
+ if a > 1:
+ np.clip(V, a_min=0, a_max=255, out=V)
+
+ img_hsv[:, :, 1] = S.astype(np.uint8)
+ img_hsv[:, :, 2] = V.astype(np.uint8)
+ if self.is_bgr:
+ cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
+ else:
+ cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB, dst=img)
+
+ sample['image'] = img
+ return sample
+
+
+@register_op
+class Norm2PixelBbox(BaseOperator):
+ """
+ Transform the bounding box's coornidates which is in [0,1] to pixels.
+ """
+
+ def __init__(self):
+ super(Norm2PixelBbox, self).__init__()
+
+ def apply(self, sample, context=None):
+ assert 'gt_bbox' in sample
+ bbox = sample['gt_bbox']
+ height, width = sample['image'].shape[:2]
+ bbox[:, 0::2] = bbox[:, 0::2] * width
+ bbox[:, 1::2] = bbox[:, 1::2] * height
+ sample['gt_bbox'] = bbox
+ return sample
+
+
+@register_op
+class BboxCXCYWH2XYXY(BaseOperator):
+ """
+ Convert bbox CXCYWH format to XYXY format.
+ [center_x, center_y, width, height] -> [x0, y0, x1, y1]
+ """
+
+ def __init__(self):
+ super(BboxCXCYWH2XYXY, self).__init__()
+
+ def apply(self, sample, context=None):
+ assert 'gt_bbox' in sample
+ bbox0 = sample['gt_bbox']
+ bbox = bbox0.copy()
+
+ bbox[:, :2] = bbox0[:, :2] - bbox0[:, 2:4] / 2.
+ bbox[:, 2:4] = bbox0[:, :2] + bbox0[:, 2:4] / 2.
+ sample['gt_bbox'] = bbox
+ return sample
+
+
+@register_op
+class RandomResizeCrop(BaseOperator):
+ """Random resize and crop image and bboxes.
+ Args:
+ resizes (list): resize image to one of resizes. if keep_ratio is True and mode is
+ 'long', resize the image's long side to the maximum of target_size, if keep_ratio is
+ True and mode is 'short', resize the image's short side to the minimum of target_size.
+ cropsizes (list): crop sizes after resize, [(min_crop_1, max_crop_1), ...]
+ mode (str): resize mode, `long` or `short`. Details see resizes.
+ prob (float): probability of this op.
+ keep_ratio (bool): whether keep_ratio or not, default true
+ interp (int): the interpolation method
+ thresholds (list): iou thresholds for decide a valid bbox crop.
+ num_attempts (int): number of tries before giving up.
+ allow_no_crop (bool): allow return without actually cropping them.
+ cover_all_box (bool): ensure all bboxes are covered in the final crop.
+ is_mask_crop(bool): whether crop the segmentation.
+ """
+
+ def __init__(
+ self,
+ resizes,
+ cropsizes,
+ prob=0.5,
+ mode='short',
+ keep_ratio=True,
+ interp=cv2.INTER_LINEAR,
+ num_attempts=3,
+ cover_all_box=False,
+ allow_no_crop=False,
+ thresholds=[0.3, 0.5, 0.7],
+ is_mask_crop=False, ):
+ super(RandomResizeCrop, self).__init__()
+
+ self.resizes = resizes
+ self.cropsizes = cropsizes
+ self.prob = prob
+ self.mode = mode
+
+ self.resizer = Resize(0, keep_ratio=keep_ratio, interp=interp)
+ self.croper = RandomCrop(
+ num_attempts=num_attempts,
+ cover_all_box=cover_all_box,
+ thresholds=thresholds,
+ allow_no_crop=allow_no_crop,
+ is_mask_crop=is_mask_crop)
+
+ def _format_size(self, size):
+ if isinstance(size, Integral):
+ size = (size, size)
+ return size
+
+ def apply(self, sample, context=None):
+ if random.random() < self.prob:
+ _resize = self._format_size(random.choice(self.resizes))
+ _cropsize = self._format_size(random.choice(self.cropsizes))
+ sample = self._resize(
+ self.resizer,
+ sample,
+ size=_resize,
+ mode=self.mode,
+ context=context)
+ sample = self._random_crop(
+ self.croper, sample, size=_cropsize, context=context)
+ return sample
+
+ @staticmethod
+ def _random_crop(croper, sample, size, context=None):
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
+ return sample
+
+ self = croper
+ h, w = sample['image'].shape[:2]
+ gt_bbox = sample['gt_bbox']
+ cropsize = size
+ min_crop = min(cropsize)
+ max_crop = max(cropsize)
+
+ thresholds = list(self.thresholds)
+ np.random.shuffle(thresholds)
+
+ for thresh in thresholds:
+ found = False
+ for _ in range(self.num_attempts):
+
+ crop_h = random.randint(min_crop, min(h, max_crop))
+ crop_w = random.randint(min_crop, min(w, max_crop))
+
+ crop_y = random.randint(0, h - crop_h)
+ crop_x = random.randint(0, w - crop_w)
+
+ crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
+ iou = self._iou_matrix(
+ gt_bbox, np.array(
+ [crop_box], dtype=np.float32))
+ if iou.max() < thresh:
+ continue
+
+ if self.cover_all_box and iou.min() < thresh:
+ continue
+
+ cropped_box, valid_ids = self._crop_box_with_center_constraint(
+ gt_bbox, np.array(
+ crop_box, dtype=np.float32))
+ if valid_ids.size > 0:
+ found = True
+ break
+
+ if found:
+ if self.is_mask_crop and 'gt_poly' in sample and len(sample[
+ 'gt_poly']) > 0:
+ crop_polys = self.crop_segms(
+ sample['gt_poly'],
+ valid_ids,
+ np.array(
+ crop_box, dtype=np.int64),
+ h,
+ w)
+ if [] in crop_polys:
+ delete_id = list()
+ valid_polys = list()
+ for id, crop_poly in enumerate(crop_polys):
+ if crop_poly == []:
+ delete_id.append(id)
+ else:
+ valid_polys.append(crop_poly)
+ valid_ids = np.delete(valid_ids, delete_id)
+ if len(valid_polys) == 0:
+ return sample
+ sample['gt_poly'] = valid_polys
+ else:
+ sample['gt_poly'] = crop_polys
+
+ if 'gt_segm' in sample:
+ sample['gt_segm'] = self._crop_segm(sample['gt_segm'],
+ crop_box)
+ sample['gt_segm'] = np.take(
+ sample['gt_segm'], valid_ids, axis=0)
+
+ sample['image'] = self._crop_image(sample['image'], crop_box)
+ sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
+ sample['gt_class'] = np.take(
+ sample['gt_class'], valid_ids, axis=0)
+ if 'gt_score' in sample:
+ sample['gt_score'] = np.take(
+ sample['gt_score'], valid_ids, axis=0)
+
+ if 'is_crowd' in sample:
+ sample['is_crowd'] = np.take(
+ sample['is_crowd'], valid_ids, axis=0)
+ return sample
+
+ return sample
+
+ @staticmethod
+ def _resize(resizer, sample, size, mode='short', context=None):
+ self = resizer
+ im = sample['image']
+ target_size = size
+
+ if not isinstance(im, np.ndarray):
+ raise TypeError("{}: image type is not numpy.".format(self))
+ if len(im.shape) != 3:
+ raise ImageError('{}: image is not 3-dimensional.'.format(self))
+
+ # apply image
+ im_shape = im.shape
+ if self.keep_ratio:
+
+ im_size_min = np.min(im_shape[0:2])
+ im_size_max = np.max(im_shape[0:2])
+
+ target_size_min = np.min(target_size)
+ target_size_max = np.max(target_size)
+
+ if mode == 'long':
+ im_scale = min(target_size_min / im_size_min,
+ target_size_max / im_size_max)
+ else:
+ im_scale = max(target_size_min / im_size_min,
+ target_size_max / im_size_max)
+
+ resize_h = im_scale * float(im_shape[0])
+ resize_w = im_scale * float(im_shape[1])
+
+ im_scale_x = im_scale
+ im_scale_y = im_scale
+ else:
+ resize_h, resize_w = target_size
+ im_scale_y = resize_h / im_shape[0]
+ im_scale_x = resize_w / im_shape[1]
+
+ im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])
+ sample['image'] = im
+ sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
+ if 'scale_factor' in sample:
+ scale_factor = sample['scale_factor']
+ sample['scale_factor'] = np.asarray(
+ [scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
+ dtype=np.float32)
+ else:
+ sample['scale_factor'] = np.asarray(
+ [im_scale_y, im_scale_x], dtype=np.float32)
+
+ # apply bbox
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
+ sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'],
+ [im_scale_x, im_scale_y],
+ [resize_w, resize_h])
+
+ # apply rbox
+ if 'gt_rbox2poly' in sample:
+ if np.array(sample['gt_rbox2poly']).shape[1] != 8:
+ logger.warn(
+ "gt_rbox2poly's length shoule be 8, but actually is {}".
+ format(len(sample['gt_rbox2poly'])))
+ sample['gt_rbox2poly'] = self.apply_bbox(sample['gt_rbox2poly'],
+ [im_scale_x, im_scale_y],
+ [resize_w, resize_h])
+
+ # apply polygon
+ if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
+ sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_shape[:2],
+ [im_scale_x, im_scale_y])
+
+ # apply semantic
+ if 'semantic' in sample and sample['semantic']:
+ semantic = sample['semantic']
+ semantic = cv2.resize(
+ semantic.astype('float32'),
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=self.interp)
+ semantic = np.asarray(semantic).astype('int32')
+ semantic = np.expand_dims(semantic, 0)
+ sample['semantic'] = semantic
+
+ # apply gt_segm
+ if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
+ masks = [
+ cv2.resize(
+ gt_segm,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=cv2.INTER_NEAREST)
+ for gt_segm in sample['gt_segm']
+ ]
+ sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
+
+ return sample
+
+
+@register_op
+class RandomSelect(BaseOperator):
+ """
+ Randomly choose a transformation between transforms1 and transforms2,
+ and the probability of choosing transforms1 is p.
+
+ The code is based on https://github.com/facebookresearch/detr/blob/main/datasets/transforms.py
+
+ """
+
+ def __init__(self, transforms1, transforms2, p=0.5):
+ super(RandomSelect, self).__init__()
+ self.transforms1 = Compose(transforms1)
+ self.transforms2 = Compose(transforms2)
+ self.p = p
+
+ def apply(self, sample, context=None):
+ if random.random() < self.p:
+ return self.transforms1(sample)
+ return self.transforms2(sample)
+
+
+@register_op
+class RandomShortSideResize(BaseOperator):
+ def __init__(self,
+ short_side_sizes,
+ max_size=None,
+ interp=cv2.INTER_LINEAR,
+ random_interp=False):
+ """
+ Resize the image randomly according to the short side. If max_size is not None,
+ the long side is scaled according to max_size. The whole process will be keep ratio.
+ Args:
+ short_side_sizes (list|tuple): Image target short side size.
+ max_size (int): The size of the longest side of image after resize.
+ interp (int): The interpolation method.
+ random_interp (bool): Whether random select interpolation method.
+ """
+ super(RandomShortSideResize, self).__init__()
+
+ assert isinstance(short_side_sizes,
+ Sequence), "short_side_sizes must be List or Tuple"
+
+ self.short_side_sizes = short_side_sizes
+ self.max_size = max_size
+ self.interp = interp
+ self.random_interp = random_interp
+ self.interps = [
+ cv2.INTER_NEAREST,
+ cv2.INTER_LINEAR,
+ cv2.INTER_AREA,
+ cv2.INTER_CUBIC,
+ cv2.INTER_LANCZOS4,
+ ]
+
+ def get_size_with_aspect_ratio(self, image_shape, size, max_size=None):
+ h, w = image_shape
+ if max_size is not None:
+ min_original_size = float(min((w, h)))
+ max_original_size = float(max((w, h)))
+ if max_original_size / min_original_size * size > max_size:
+ size = int(
+ round(max_size * min_original_size / max_original_size))
+
+ if (w <= h and w == size) or (h <= w and h == size):
+ return (w, h)
+
+ if w < h:
+ ow = size
+ oh = int(size * h / w)
+ else:
+ oh = size
+ ow = int(size * w / h)
+
+ return (ow, oh)
+
+ def resize(self,
+ sample,
+ target_size,
+ max_size=None,
+ interp=cv2.INTER_LINEAR):
+ im = sample['image']
+ if not isinstance(im, np.ndarray):
+ raise TypeError("{}: image type is not numpy.".format(self))
+ if len(im.shape) != 3:
+ raise ImageError('{}: image is not 3-dimensional.'.format(self))
+
+ target_size = self.get_size_with_aspect_ratio(im.shape[:2], target_size,
+ max_size)
+ im_scale_y, im_scale_x = target_size[1] / im.shape[0], target_size[
+ 0] / im.shape[1]
+
+ sample['image'] = cv2.resize(im, target_size, interpolation=interp)
+ sample['im_shape'] = np.asarray(target_size[::-1], dtype=np.float32)
+ if 'scale_factor' in sample:
+ scale_factor = sample['scale_factor']
+ sample['scale_factor'] = np.asarray(
+ [scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
+ dtype=np.float32)
+ else:
+ sample['scale_factor'] = np.asarray(
+ [im_scale_y, im_scale_x], dtype=np.float32)
+
+ # apply bbox
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
+ sample['gt_bbox'] = self.apply_bbox(
+ sample['gt_bbox'], [im_scale_x, im_scale_y], target_size)
+ # apply polygon
+ if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
+ sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im.shape[:2],
+ [im_scale_x, im_scale_y])
+ # apply semantic
+ if 'semantic' in sample and sample['semantic']:
+ semantic = sample['semantic']
+ semantic = cv2.resize(
+ semantic.astype('float32'),
+ target_size,
+ interpolation=self.interp)
+ semantic = np.asarray(semantic).astype('int32')
+ semantic = np.expand_dims(semantic, 0)
+ sample['semantic'] = semantic
+ # apply gt_segm
+ if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
+ masks = [
+ cv2.resize(
+ gt_segm, target_size, interpolation=cv2.INTER_NEAREST)
+ for gt_segm in sample['gt_segm']
+ ]
+ sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
+ return sample
+
+ def apply_bbox(self, bbox, scale, size):
+ im_scale_x, im_scale_y = scale
+ resize_w, resize_h = size
+ bbox[:, 0::2] *= im_scale_x
+ bbox[:, 1::2] *= im_scale_y
+ bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, resize_w)
+ bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, resize_h)
+ return bbox.astype('float32')
+
+ def apply_segm(self, segms, im_size, scale):
+ def _resize_poly(poly, im_scale_x, im_scale_y):
+ resized_poly = np.array(poly).astype('float32')
+ resized_poly[0::2] *= im_scale_x
+ resized_poly[1::2] *= im_scale_y
+ return resized_poly.tolist()
+
+ def _resize_rle(rle, im_h, im_w, im_scale_x, im_scale_y):
+ if 'counts' in rle and type(rle['counts']) == list:
+ rle = mask_util.frPyObjects(rle, im_h, im_w)
+
+ mask = mask_util.decode(rle)
+ mask = cv2.resize(
+ mask,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=self.interp)
+ rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
+ return rle
+
+ im_h, im_w = im_size
+ im_scale_x, im_scale_y = scale
+ resized_segms = []
+ for segm in segms:
+ if is_poly(segm):
+ # Polygon format
+ resized_segms.append([
+ _resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
+ ])
+ else:
+ # RLE format
+ import pycocotools.mask as mask_util
+ resized_segms.append(
+ _resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
+
+ return resized_segms
+
+ def apply(self, sample, context=None):
+ target_size = random.choice(self.short_side_sizes)
+ interp = random.choice(
+ self.interps) if self.random_interp else self.interp
+
+ return self.resize(sample, target_size, self.max_size, interp)
+
+
+@register_op
+class RandomSizeCrop(BaseOperator):
+ """
+ Cut the image randomly according to `min_size` and `max_size`
+ """
+
+ def __init__(self, min_size, max_size):
+ super(RandomSizeCrop, self).__init__()
+ self.min_size = min_size
+ self.max_size = max_size
+
+ from paddle.vision.transforms.functional import crop as paddle_crop
+ self.paddle_crop = paddle_crop
+
+ @staticmethod
+ def get_crop_params(img_shape, output_size):
+ """Get parameters for ``crop`` for a random crop.
+ Args:
+ img_shape (list|tuple): Image's height and width.
+ output_size (list|tuple): Expected output size of the crop.
+ Returns:
+ tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
+ """
+ h, w = img_shape
+ th, tw = output_size
+
+ if h + 1 < th or w + 1 < tw:
+ raise ValueError(
+ "Required crop size {} is larger then input image size {}".
+ format((th, tw), (h, w)))
+
+ if w == tw and h == th:
+ return 0, 0, h, w
+
+ i = random.randint(0, h - th + 1)
+ j = random.randint(0, w - tw + 1)
+ return i, j, th, tw
+
+ def crop(self, sample, region):
+ image_shape = sample['image'].shape[:2]
+ sample['image'] = self.paddle_crop(sample['image'], *region)
+
+ keep_index = None
+ # apply bbox
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
+ sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], region)
+ bbox = sample['gt_bbox'].reshape([-1, 2, 2])
+ area = (bbox[:, 1, :] - bbox[:, 0, :]).prod(axis=1)
+ keep_index = np.where(area > 0)[0]
+ sample['gt_bbox'] = sample['gt_bbox'][keep_index] if len(
+ keep_index) > 0 else np.zeros(
+ [0, 4], dtype=np.float32)
+ sample['gt_class'] = sample['gt_class'][keep_index] if len(
+ keep_index) > 0 else np.zeros(
+ [0, 1], dtype=np.float32)
+ if 'gt_score' in sample:
+ sample['gt_score'] = sample['gt_score'][keep_index] if len(
+ keep_index) > 0 else np.zeros(
+ [0, 1], dtype=np.float32)
+ if 'is_crowd' in sample:
+ sample['is_crowd'] = sample['is_crowd'][keep_index] if len(
+ keep_index) > 0 else np.zeros(
+ [0, 1], dtype=np.float32)
+
+ # apply polygon
+ if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
+ sample['gt_poly'] = self.apply_segm(sample['gt_poly'], region,
+ image_shape)
+ if keep_index is not None:
+ sample['gt_poly'] = sample['gt_poly'][keep_index]
+ # apply gt_segm
+ if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
+ i, j, h, w = region
+ sample['gt_segm'] = sample['gt_segm'][:, i:i + h, j:j + w]
+ if keep_index is not None:
+ sample['gt_segm'] = sample['gt_segm'][keep_index]
+
+ return sample
+
+ def apply_bbox(self, bbox, region):
+ i, j, h, w = region
+ region_size = np.asarray([w, h])
+ crop_bbox = bbox - np.asarray([j, i, j, i])
+ crop_bbox = np.minimum(crop_bbox.reshape([-1, 2, 2]), region_size)
+ crop_bbox = crop_bbox.clip(min=0)
+ return crop_bbox.reshape([-1, 4]).astype('float32')
+
+ def apply_segm(self, segms, region, image_shape):
+ def _crop_poly(segm, crop):
+ xmin, ymin, xmax, ymax = crop
+ crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]
+ crop_p = np.array(crop_coord).reshape(4, 2)
+ crop_p = Polygon(crop_p)
+
+ crop_segm = list()
+ for poly in segm:
+ poly = np.array(poly).reshape(len(poly) // 2, 2)
+ polygon = Polygon(poly)
+ if not polygon.is_valid:
+ exterior = polygon.exterior
+ multi_lines = exterior.intersection(exterior)
+ polygons = shapely.ops.polygonize(multi_lines)
+ polygon = MultiPolygon(polygons)
+ multi_polygon = list()
+ if isinstance(polygon, MultiPolygon):
+ multi_polygon = copy.deepcopy(polygon)
+ else:
+ multi_polygon.append(copy.deepcopy(polygon))
+ for per_polygon in multi_polygon:
+ inter = per_polygon.intersection(crop_p)
+ if not inter:
+ continue
+ if isinstance(inter, (MultiPolygon, GeometryCollection)):
+ for part in inter:
+ if not isinstance(part, Polygon):
+ continue
+ part = np.squeeze(
+ np.array(part.exterior.coords[:-1]).reshape(1,
+ -1))
+ part[0::2] -= xmin
+ part[1::2] -= ymin
+ crop_segm.append(part.tolist())
+ elif isinstance(inter, Polygon):
+ crop_poly = np.squeeze(
+ np.array(inter.exterior.coords[:-1]).reshape(1, -1))
+ crop_poly[0::2] -= xmin
+ crop_poly[1::2] -= ymin
+ crop_segm.append(crop_poly.tolist())
+ else:
+ continue
+ return crop_segm
+
+ def _crop_rle(rle, crop, height, width):
+ if 'counts' in rle and type(rle['counts']) == list:
+ rle = mask_util.frPyObjects(rle, height, width)
+ mask = mask_util.decode(rle)
+ mask = mask[crop[1]:crop[3], crop[0]:crop[2]]
+ rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
+ return rle
+
+ i, j, h, w = region
+ crop = [j, i, j + w, i + h]
+ height, width = image_shape
+ crop_segms = []
+ for segm in segms:
+ if is_poly(segm):
+ import copy
+ import shapely.ops
+ from shapely.geometry import Polygon, MultiPolygon, GeometryCollection
+ # Polygon format
+ crop_segms.append(_crop_poly(segm, crop))
+ else:
+ # RLE format
+ import pycocotools.mask as mask_util
+ crop_segms.append(_crop_rle(segm, crop, height, width))
+ return crop_segms
+
+ def apply(self, sample, context=None):
+ h = random.randint(self.min_size,
+ min(sample['image'].shape[0], self.max_size))
+ w = random.randint(self.min_size,
+ min(sample['image'].shape[1], self.max_size))
+
+ region = self.get_crop_params(sample['image'].shape[:2], [h, w])
+ return self.crop(sample, region)
+
+
+@register_op
+class WarpAffine(BaseOperator):
+ def __init__(self,
+ keep_res=False,
+ pad=31,
+ input_h=512,
+ input_w=512,
+ scale=0.4,
+ shift=0.1):
+ """WarpAffine
+ Warp affine the image
+
+ The code is based on https://github.com/xingyizhou/CenterNet/blob/master/src/lib/datasets/sample/ctdet.py
+
+
+ """
+ super(WarpAffine, self).__init__()
+ self.keep_res = keep_res
+ self.pad = pad
+ self.input_h = input_h
+ self.input_w = input_w
+ self.scale = scale
+ self.shift = shift
+
+ def apply(self, sample, context=None):
+ img = sample['image']
+ img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
+ return sample
+
+ h, w = img.shape[:2]
+
+ if self.keep_res:
+ input_h = (h | self.pad) + 1
+ input_w = (w | self.pad) + 1
+ s = np.array([input_w, input_h], dtype=np.float32)
+ c = np.array([w // 2, h // 2], dtype=np.float32)
+
+ else:
+ s = max(h, w) * 1.0
+ input_h, input_w = self.input_h, self.input_w
+ c = np.array([w / 2., h / 2.], dtype=np.float32)
+
+ trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
+ img = cv2.resize(img, (w, h))
+ inp = cv2.warpAffine(
+ img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
+ sample['image'] = inp
+ return sample
+
+
+@register_op
+class FlipWarpAffine(BaseOperator):
+ def __init__(self,
+ keep_res=False,
+ pad=31,
+ input_h=512,
+ input_w=512,
+ not_rand_crop=False,
+ scale=0.4,
+ shift=0.1,
+ flip=0.5,
+ is_scale=True,
+ use_random=True):
+ """FlipWarpAffine
+ 1. Random Crop
+ 2. Flip the image horizontal
+ 3. Warp affine the image
+ """
+ super(FlipWarpAffine, self).__init__()
+ self.keep_res = keep_res
+ self.pad = pad
+ self.input_h = input_h
+ self.input_w = input_w
+ self.not_rand_crop = not_rand_crop
+ self.scale = scale
+ self.shift = shift
+ self.flip = flip
+ self.is_scale = is_scale
+ self.use_random = use_random
+
+ def apply(self, sample, context=None):
+ img = sample['image']
+ img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
+ if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
+ return sample
+
+ h, w = img.shape[:2]
+
+ if self.keep_res:
+ input_h = (h | self.pad) + 1
+ input_w = (w | self.pad) + 1
+ s = np.array([input_w, input_h], dtype=np.float32)
+ c = np.array([w // 2, h // 2], dtype=np.float32)
+
+ else:
+ s = max(h, w) * 1.0
+ input_h, input_w = self.input_h, self.input_w
+ c = np.array([w / 2., h / 2.], dtype=np.float32)
+
+ if self.use_random:
+ gt_bbox = sample['gt_bbox']
+ if not self.not_rand_crop:
+ s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
+ w_border = get_border(128, w)
+ h_border = get_border(128, h)
+ c[0] = np.random.randint(low=w_border, high=w - w_border)
+ c[1] = np.random.randint(low=h_border, high=h - h_border)
+ else:
+ sf = self.scale
+ cf = self.shift
+ c[0] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
+ c[1] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
+ s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
+
+ if np.random.random() < self.flip:
+ img = img[:, ::-1, :]
+ c[0] = w - c[0] - 1
+ oldx1 = gt_bbox[:, 0].copy()
+ oldx2 = gt_bbox[:, 2].copy()
+ gt_bbox[:, 0] = w - oldx2 - 1
+ gt_bbox[:, 2] = w - oldx1 - 1
+ sample['gt_bbox'] = gt_bbox
+
+ trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
+ if not self.use_random:
+ img = cv2.resize(img, (w, h))
+ inp = cv2.warpAffine(
+ img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
+ if self.is_scale:
+ inp = (inp.astype(np.float32) / 255.)
+ sample['image'] = inp
+ sample['center'] = c
+ sample['scale'] = s
+ return sample
+
+
+@register_op
+class CenterRandColor(BaseOperator):
+ """Random color for CenterNet series models.
+ Args:
+ saturation (float): saturation settings.
+ contrast (float): contrast settings.
+ brightness (float): brightness settings.
+ """
+
+ def __init__(self, saturation=0.4, contrast=0.4, brightness=0.4):
+ super(CenterRandColor, self).__init__()
+ self.saturation = saturation
+ self.contrast = contrast
+ self.brightness = brightness
+
+ def apply_saturation(self, img, img_gray):
+ alpha = 1. + np.random.uniform(
+ low=-self.saturation, high=self.saturation)
+ self._blend(alpha, img, img_gray[:, :, None])
+ return img
+
+ def apply_contrast(self, img, img_gray):
+ alpha = 1. + np.random.uniform(low=-self.contrast, high=self.contrast)
+ img_mean = img_gray.mean()
+ self._blend(alpha, img, img_mean)
+ return img
+
+ def apply_brightness(self, img, img_gray):
+ alpha = 1 + np.random.uniform(
+ low=-self.brightness, high=self.brightness)
+ img *= alpha
+ return img
+
+ def _blend(self, alpha, img, img_mean):
+ img *= alpha
+ img_mean *= (1 - alpha)
+ img += img_mean
+
+ def __call__(self, sample, context=None):
+ img = sample['image']
+ img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ functions = [
+ self.apply_brightness,
+ self.apply_contrast,
+ self.apply_saturation,
+ ]
+ distortions = np.random.permutation(functions)
+ for func in distortions:
+ img = func(img, img_gray)
+ sample['image'] = img
+ return sample
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__init__.py
new file mode 100644
index 000000000..9d14ee634
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__init__.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import trainer
+from .trainer import *
+
+from . import callbacks
+from .callbacks import *
+
+from . import env
+from .env import *
+
+__all__ = trainer.__all__ \
+ + callbacks.__all__ \
+ + env.__all__
+
+from . import tracker
+from .tracker import *
+__all__ = __all__ + tracker.__all__
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..3720322a3
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/callbacks.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/callbacks.cpython-37.pyc
new file mode 100644
index 000000000..fa174b4c0
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/callbacks.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/env.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/env.cpython-37.pyc
new file mode 100644
index 000000000..c8937c83a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/env.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/export_utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/export_utils.cpython-37.pyc
new file mode 100644
index 000000000..0ec950e26
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/export_utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/tracker.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/tracker.cpython-37.pyc
new file mode 100644
index 000000000..179fd496d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/tracker.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/trainer.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/trainer.cpython-37.pyc
new file mode 100644
index 000000000..e3294bdaa
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/__pycache__/trainer.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/callbacks.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/callbacks.py
new file mode 100644
index 000000000..df42a687c
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/callbacks.py
@@ -0,0 +1,335 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+import datetime
+import six
+import copy
+import json
+
+import paddle
+import paddle.distributed as dist
+
+from ppdet.utils.checkpoint import save_model
+from ppdet.metrics import get_infer_results
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger('ppdet.engine')
+
+__all__ = ['Callback', 'ComposeCallback', 'LogPrinter', 'Checkpointer', 'VisualDLWriter', 'SniperProposalsGenerator']
+
+
+class Callback(object):
+ def __init__(self, model):
+ self.model = model
+
+ def on_step_begin(self, status):
+ pass
+
+ def on_step_end(self, status):
+ pass
+
+ def on_epoch_begin(self, status):
+ pass
+
+ def on_epoch_end(self, status):
+ pass
+
+ def on_train_begin(self, status):
+ pass
+
+ def on_train_end(self, status):
+ pass
+
+
+class ComposeCallback(object):
+ def __init__(self, callbacks):
+ callbacks = [c for c in list(callbacks) if c is not None]
+ for c in callbacks:
+ assert isinstance(
+ c, Callback), "callback should be subclass of Callback"
+ self._callbacks = callbacks
+
+ def on_step_begin(self, status):
+ for c in self._callbacks:
+ c.on_step_begin(status)
+
+ def on_step_end(self, status):
+ for c in self._callbacks:
+ c.on_step_end(status)
+
+ def on_epoch_begin(self, status):
+ for c in self._callbacks:
+ c.on_epoch_begin(status)
+
+ def on_epoch_end(self, status):
+ for c in self._callbacks:
+ c.on_epoch_end(status)
+
+ def on_train_begin(self, status):
+ for c in self._callbacks:
+ c.on_train_begin(status)
+
+ def on_train_end(self, status):
+ for c in self._callbacks:
+ c.on_train_end(status)
+
+
+class LogPrinter(Callback):
+ def __init__(self, model):
+ super(LogPrinter, self).__init__(model)
+
+ def on_step_end(self, status):
+ if dist.get_world_size() < 2 or dist.get_rank() == 0:
+ mode = status['mode']
+ if mode == 'train':
+ epoch_id = status['epoch_id']
+ step_id = status['step_id']
+ steps_per_epoch = status['steps_per_epoch']
+ training_staus = status['training_staus']
+ batch_time = status['batch_time']
+ data_time = status['data_time']
+
+ epoches = self.model.cfg.epoch
+ batch_size = self.model.cfg['{}Reader'.format(mode.capitalize(
+ ))]['batch_size']
+
+ logs = training_staus.log()
+ space_fmt = ':' + str(len(str(steps_per_epoch))) + 'd'
+ if step_id % self.model.cfg.log_iter == 0:
+ eta_steps = (epoches - epoch_id) * steps_per_epoch - step_id
+ eta_sec = eta_steps * batch_time.global_avg
+ eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
+ ips = float(batch_size) / batch_time.avg
+ fmt = ' '.join([
+ 'Epoch: [{}]',
+ '[{' + space_fmt + '}/{}]',
+ 'learning_rate: {lr:.6f}',
+ '{meters}',
+ 'eta: {eta}',
+ 'batch_cost: {btime}',
+ 'data_cost: {dtime}',
+ 'ips: {ips:.4f} images/s',
+ ])
+ fmt = fmt.format(
+ epoch_id,
+ step_id,
+ steps_per_epoch,
+ lr=status['learning_rate'],
+ meters=logs,
+ eta=eta_str,
+ btime=str(batch_time),
+ dtime=str(data_time),
+ ips=ips)
+ logger.info(fmt)
+ if mode == 'eval':
+ step_id = status['step_id']
+ if step_id % 100 == 0:
+ logger.info("Eval iter: {}".format(step_id))
+
+ def on_epoch_end(self, status):
+ if dist.get_world_size() < 2 or dist.get_rank() == 0:
+ mode = status['mode']
+ if mode == 'eval':
+ sample_num = status['sample_num']
+ cost_time = status['cost_time']
+ logger.info('Total sample number: {}, averge FPS: {}'.format(
+ sample_num, sample_num / cost_time))
+
+
+class Checkpointer(Callback):
+ def __init__(self, model):
+ super(Checkpointer, self).__init__(model)
+ cfg = self.model.cfg
+ self.best_ap = 0.
+ self.save_dir = os.path.join(self.model.cfg.save_dir,
+ self.model.cfg.filename)
+ if hasattr(self.model.model, 'student_model'):
+ self.weight = self.model.model.student_model
+ else:
+ self.weight = self.model.model
+
+ def on_epoch_end(self, status):
+ # Checkpointer only performed during training
+ mode = status['mode']
+ epoch_id = status['epoch_id']
+ weight = None
+ save_name = None
+ if dist.get_world_size() < 2 or dist.get_rank() == 0:
+ if mode == 'train':
+ end_epoch = self.model.cfg.epoch
+ if (
+ epoch_id + 1
+ ) % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:
+ save_name = str(
+ epoch_id) if epoch_id != end_epoch - 1 else "model_final"
+ weight = self.weight
+ elif mode == 'eval':
+ if 'save_best_model' in status and status['save_best_model']:
+ for metric in self.model._metrics:
+ map_res = metric.get_results()
+ if 'bbox' in map_res:
+ key = 'bbox'
+ elif 'keypoint' in map_res:
+ key = 'keypoint'
+ else:
+ key = 'mask'
+ if key not in map_res:
+ logger.warning("Evaluation results empty, this may be due to " \
+ "training iterations being too few or not " \
+ "loading the correct weights.")
+ return
+ if map_res[key][0] > self.best_ap:
+ self.best_ap = map_res[key][0]
+ save_name = 'best_model'
+ weight = self.weight
+ logger.info("Best test {} ap is {:0.3f}.".format(
+ key, self.best_ap))
+ if weight:
+ save_model(weight, self.model.optimizer, self.save_dir,
+ save_name, epoch_id + 1)
+
+
+class WiferFaceEval(Callback):
+ def __init__(self, model):
+ super(WiferFaceEval, self).__init__(model)
+
+ def on_epoch_begin(self, status):
+ assert self.model.mode == 'eval', \
+ "WiferFaceEval can only be set during evaluation"
+ for metric in self.model._metrics:
+ metric.update(self.model.model)
+ sys.exit()
+
+
+class VisualDLWriter(Callback):
+ """
+ Use VisualDL to log data or image
+ """
+
+ def __init__(self, model):
+ super(VisualDLWriter, self).__init__(model)
+
+ assert six.PY3, "VisualDL requires Python >= 3.5"
+ try:
+ from visualdl import LogWriter
+ except Exception as e:
+ logger.error('visualdl not found, plaese install visualdl. '
+ 'for example: `pip install visualdl`.')
+ raise e
+ self.vdl_writer = LogWriter(
+ model.cfg.get('vdl_log_dir', 'vdl_log_dir/scalar'))
+ self.vdl_loss_step = 0
+ self.vdl_mAP_step = 0
+ self.vdl_image_step = 0
+ self.vdl_image_frame = 0
+
+ def on_step_end(self, status):
+ mode = status['mode']
+ if dist.get_world_size() < 2 or dist.get_rank() == 0:
+ if mode == 'train':
+ training_staus = status['training_staus']
+ for loss_name, loss_value in training_staus.get().items():
+ self.vdl_writer.add_scalar(loss_name, loss_value,
+ self.vdl_loss_step)
+ self.vdl_loss_step += 1
+ elif mode == 'test':
+ ori_image = status['original_image']
+ result_image = status['result_image']
+ self.vdl_writer.add_image(
+ "original/frame_{}".format(self.vdl_image_frame), ori_image,
+ self.vdl_image_step)
+ self.vdl_writer.add_image(
+ "result/frame_{}".format(self.vdl_image_frame),
+ result_image, self.vdl_image_step)
+ self.vdl_image_step += 1
+ # each frame can display ten pictures at most.
+ if self.vdl_image_step % 10 == 0:
+ self.vdl_image_step = 0
+ self.vdl_image_frame += 1
+
+ def on_epoch_end(self, status):
+ mode = status['mode']
+ if dist.get_world_size() < 2 or dist.get_rank() == 0:
+ if mode == 'eval':
+ for metric in self.model._metrics:
+ for key, map_value in metric.get_results().items():
+ self.vdl_writer.add_scalar("{}-mAP".format(key),
+ map_value[0],
+ self.vdl_mAP_step)
+ self.vdl_mAP_step += 1
+
+
+class SniperProposalsGenerator(Callback):
+ def __init__(self, model):
+ super(SniperProposalsGenerator, self).__init__(model)
+ ori_dataset = self.model.dataset
+ self.dataset = self._create_new_dataset(ori_dataset)
+ self.loader = self.model.loader
+ self.cfg = self.model.cfg
+ self.infer_model = self.model.model
+
+ def _create_new_dataset(self, ori_dataset):
+ dataset = copy.deepcopy(ori_dataset)
+ # init anno_cropper
+ dataset.init_anno_cropper()
+ # generate infer roidbs
+ ori_roidbs = dataset.get_ori_roidbs()
+ roidbs = dataset.anno_cropper.crop_infer_anno_records(ori_roidbs)
+ # set new roidbs
+ dataset.set_roidbs(roidbs)
+
+ return dataset
+
+ def _eval_with_loader(self, loader):
+ results = []
+ with paddle.no_grad():
+ self.infer_model.eval()
+ for step_id, data in enumerate(loader):
+ outs = self.infer_model(data)
+ for key in ['im_shape', 'scale_factor', 'im_id']:
+ outs[key] = data[key]
+ for key, value in outs.items():
+ if hasattr(value, 'numpy'):
+ outs[key] = value.numpy()
+
+ results.append(outs)
+
+ return results
+
+ def on_train_end(self, status):
+ self.loader.dataset = self.dataset
+ results = self._eval_with_loader(self.loader)
+ results = self.dataset.anno_cropper.aggregate_chips_detections(results)
+ # sniper
+ proposals = []
+ clsid2catid = {v: k for k, v in self.dataset.catid2clsid.items()}
+ for outs in results:
+ batch_res = get_infer_results(outs, clsid2catid)
+ start = 0
+ for i, im_id in enumerate(outs['im_id']):
+ bbox_num = outs['bbox_num']
+ end = start + bbox_num[i]
+ bbox_res = batch_res['bbox'][start:end] \
+ if 'bbox' in batch_res else None
+ if bbox_res:
+ proposals += bbox_res
+ logger.info("save proposals in {}".format(self.cfg.proposals_path))
+ with open(self.cfg.proposals_path, 'w') as f:
+ json.dump(proposals, f)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/env.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/env.py
new file mode 100644
index 000000000..0a896571d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/env.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import random
+import numpy as np
+
+import paddle
+from paddle.distributed import fleet
+
+__all__ = ['init_parallel_env', 'set_random_seed', 'init_fleet_env']
+
+
+def init_fleet_env(find_unused_parameters=False):
+ strategy = fleet.DistributedStrategy()
+ strategy.find_unused_parameters = find_unused_parameters
+ fleet.init(is_collective=True, strategy=strategy)
+
+
+def init_parallel_env():
+ env = os.environ
+ dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env
+ if dist:
+ trainer_id = int(env['PADDLE_TRAINER_ID'])
+ local_seed = (99 + trainer_id)
+ random.seed(local_seed)
+ np.random.seed(local_seed)
+
+ paddle.distributed.init_parallel_env()
+
+
+def set_random_seed(seed):
+ paddle.seed(seed)
+ random.seed(seed)
+ np.random.seed(seed)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/export_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/export_utils.py
new file mode 100644
index 000000000..e1cf64638
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/export_utils.py
@@ -0,0 +1,175 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import yaml
+from collections import OrderedDict
+
+import paddle
+from ppdet.data.source.category import get_categories
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger('ppdet.engine')
+
+# Global dictionary
+TRT_MIN_SUBGRAPH = {
+ 'YOLO': 3,
+ 'SSD': 60,
+ 'RCNN': 40,
+ 'RetinaNet': 40,
+ 'S2ANet': 80,
+ 'EfficientDet': 40,
+ 'Face': 3,
+ 'TTFNet': 60,
+ 'FCOS': 16,
+ 'SOLOv2': 60,
+ 'HigherHRNet': 3,
+ 'HRNet': 3,
+ 'DeepSORT': 3,
+ 'JDE': 10,
+ 'FairMOT': 5,
+ 'GFL': 16,
+ 'PicoDet': 3,
+ 'CenterNet': 5,
+}
+
+KEYPOINT_ARCH = ['HigherHRNet', 'TopDownHRNet']
+MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT']
+
+
+def _prune_input_spec(input_spec, program, targets):
+ # try to prune static program to figure out pruned input spec
+ # so we perform following operations in static mode
+ paddle.enable_static()
+ pruned_input_spec = [{}]
+ program = program.clone()
+ program = program._prune(targets=targets)
+ global_block = program.global_block()
+ for name, spec in input_spec[0].items():
+ try:
+ v = global_block.var(name)
+ pruned_input_spec[0][name] = spec
+ except Exception:
+ pass
+ paddle.disable_static()
+ return pruned_input_spec
+
+
+def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):
+ preprocess_list = []
+
+ anno_file = dataset_cfg.get_anno()
+
+ clsid2catid, catid2name = get_categories(metric, anno_file, arch)
+
+ label_list = [str(cat) for cat in catid2name.values()]
+
+ fuse_normalize = reader_cfg.get('fuse_normalize', False)
+ sample_transforms = reader_cfg['sample_transforms']
+ for st in sample_transforms[1:]:
+ for key, value in st.items():
+ p = {'type': key}
+ if key == 'Resize':
+ if int(image_shape[1]) != -1:
+ value['target_size'] = image_shape[1:]
+ if fuse_normalize and key == 'NormalizeImage':
+ continue
+ p.update(value)
+ preprocess_list.append(p)
+ batch_transforms = reader_cfg.get('batch_transforms', None)
+ if batch_transforms:
+ for bt in batch_transforms:
+ for key, value in bt.items():
+ # for deploy/infer, use PadStride(stride) instead PadBatch(pad_to_stride)
+ if key == 'PadBatch':
+ preprocess_list.append({
+ 'type': 'PadStride',
+ 'stride': value['pad_to_stride']
+ })
+ break
+
+ return preprocess_list, label_list
+
+
+def _parse_tracker(tracker_cfg):
+ tracker_params = {}
+ for k, v in tracker_cfg.items():
+ tracker_params.update({k: v})
+ return tracker_params
+
+
+def _dump_infer_config(config, path, image_shape, model):
+ arch_state = False
+ from ppdet.core.config.yaml_helpers import setup_orderdict
+ setup_orderdict()
+ use_dynamic_shape = True if image_shape[2] == -1 else False
+ infer_cfg = OrderedDict({
+ 'mode': 'fluid',
+ 'draw_threshold': 0.5,
+ 'metric': config['metric'],
+ 'use_dynamic_shape': use_dynamic_shape
+ })
+ infer_arch = config['architecture']
+
+ if infer_arch in MOT_ARCH:
+ if infer_arch == 'DeepSORT':
+ tracker_cfg = config['DeepSORTTracker']
+ else:
+ tracker_cfg = config['JDETracker']
+ infer_cfg['tracker'] = _parse_tracker(tracker_cfg)
+
+ for arch, min_subgraph_size in TRT_MIN_SUBGRAPH.items():
+ if arch in infer_arch:
+ infer_cfg['arch'] = arch
+ infer_cfg['min_subgraph_size'] = min_subgraph_size
+ arch_state = True
+ break
+ if not arch_state:
+ logger.error(
+ 'Architecture: {} is not supported for exporting model now.\n'.
+ format(infer_arch) +
+ 'Please set TRT_MIN_SUBGRAPH in ppdet/engine/export_utils.py')
+ os._exit(0)
+ if 'mask_head' in config[config['architecture']] and config[config[
+ 'architecture']]['mask_head']:
+ infer_cfg['mask'] = True
+ label_arch = 'detection_arch'
+ if infer_arch in KEYPOINT_ARCH:
+ label_arch = 'keypoint_arch'
+
+ if infer_arch in MOT_ARCH:
+ label_arch = 'mot_arch'
+ reader_cfg = config['TestMOTReader']
+ dataset_cfg = config['TestMOTDataset']
+ else:
+ reader_cfg = config['TestReader']
+ dataset_cfg = config['TestDataset']
+
+ infer_cfg['Preprocess'], infer_cfg['label_list'] = _parse_reader(
+ reader_cfg, dataset_cfg, config['metric'], label_arch, image_shape[1:])
+
+ if infer_arch == 'PicoDet':
+ infer_cfg['NMS'] = config['PicoHead']['nms']
+ # In order to speed up the prediction, the threshold of nms
+ # is adjusted here, which can be changed in infer_cfg.yml
+ config['PicoHead']['nms']["score_threshold"] = 0.3
+ config['PicoHead']['nms']["nms_threshold"] = 0.5
+ infer_cfg['fpn_stride'] = config['PicoHead']['fpn_stride']
+
+ yaml.dump(infer_cfg, open(path, 'w'))
+ logger.info("Export inference config file to {}".format(os.path.join(path)))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/tracker.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/tracker.py
new file mode 100644
index 000000000..75602cb64
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/tracker.py
@@ -0,0 +1,536 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import cv2
+import glob
+import paddle
+import numpy as np
+from collections import defaultdict
+
+from ppdet.core.workspace import create
+from ppdet.utils.checkpoint import load_weight, load_pretrain_weight
+from ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
+from ppdet.modeling.mot.utils import MOTTimer, load_det_results, write_mot_results, save_vis_results
+
+from ppdet.metrics import Metric, MOTMetric, KITTIMOTMetric
+from ppdet.metrics import MCMOTMetric
+import ppdet.utils.stats as stats
+
+from .callbacks import Callback, ComposeCallback
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['Tracker']
+
+
+class Tracker(object):
+ def __init__(self, cfg, mode='eval'):
+ self.cfg = cfg
+ assert mode.lower() in ['test', 'eval'], \
+ "mode should be 'test' or 'eval'"
+ self.mode = mode.lower()
+ self.optimizer = None
+
+ # build MOT data loader
+ self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]
+
+ # build model
+ self.model = create(cfg.architecture)
+
+ self.status = {}
+ self.start_epoch = 0
+
+ # initial default callbacks
+ self._init_callbacks()
+
+ # initial default metrics
+ self._init_metrics()
+ self._reset_metrics()
+
+ def _init_callbacks(self):
+ self._callbacks = []
+ self._compose_callback = None
+
+ def _init_metrics(self):
+ if self.mode in ['test']:
+ self._metrics = []
+ return
+
+ if self.cfg.metric == 'MOT':
+ self._metrics = [MOTMetric(), ]
+ elif self.cfg.metric == 'MCMOT':
+ self._metrics = [MCMOTMetric(self.cfg.num_classes), ]
+ elif self.cfg.metric == 'KITTI':
+ self._metrics = [KITTIMOTMetric(), ]
+ else:
+ logger.warning("Metric not support for metric type {}".format(
+ self.cfg.metric))
+ self._metrics = []
+
+ def _reset_metrics(self):
+ for metric in self._metrics:
+ metric.reset()
+
+ def register_callbacks(self, callbacks):
+ callbacks = [h for h in list(callbacks) if h is not None]
+ for c in callbacks:
+ assert isinstance(c, Callback), \
+ "metrics shoule be instances of subclass of Metric"
+ self._callbacks.extend(callbacks)
+ self._compose_callback = ComposeCallback(self._callbacks)
+
+ def register_metrics(self, metrics):
+ metrics = [m for m in list(metrics) if m is not None]
+ for m in metrics:
+ assert isinstance(m, Metric), \
+ "metrics shoule be instances of subclass of Metric"
+ self._metrics.extend(metrics)
+
+ def load_weights_jde(self, weights):
+ load_weight(self.model, weights, self.optimizer)
+
+ def load_weights_sde(self, det_weights, reid_weights):
+ if self.model.detector:
+ load_weight(self.model.detector, det_weights)
+ load_weight(self.model.reid, reid_weights)
+ else:
+ load_weight(self.model.reid, reid_weights, self.optimizer)
+
+ def _eval_seq_jde(self,
+ dataloader,
+ save_dir=None,
+ show_image=False,
+ frame_rate=30,
+ draw_threshold=0):
+ if save_dir:
+ if not os.path.exists(save_dir): os.makedirs(save_dir)
+ tracker = self.model.tracker
+ tracker.max_time_lost = int(frame_rate / 30.0 * tracker.track_buffer)
+
+ timer = MOTTimer()
+ frame_id = 0
+ self.status['mode'] = 'track'
+ self.model.eval()
+ results = defaultdict(list) # support single class and multi classes
+
+ for step_id, data in enumerate(dataloader):
+ self.status['step_id'] = step_id
+ if frame_id % 40 == 0:
+ logger.info('Processing frame {} ({:.2f} fps)'.format(
+ frame_id, 1. / max(1e-5, timer.average_time)))
+ # forward
+ timer.tic()
+ pred_dets, pred_embs = self.model(data)
+
+ pred_dets, pred_embs = pred_dets.numpy(), pred_embs.numpy()
+ online_targets_dict = self.model.tracker.update(pred_dets,
+ pred_embs)
+ online_tlwhs = defaultdict(list)
+ online_scores = defaultdict(list)
+ online_ids = defaultdict(list)
+ for cls_id in range(self.cfg.num_classes):
+ online_targets = online_targets_dict[cls_id]
+ for t in online_targets:
+ tlwh = t.tlwh
+ tid = t.track_id
+ tscore = t.score
+ if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
+ if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
+ 3] > tracker.vertical_ratio:
+ continue
+ online_tlwhs[cls_id].append(tlwh)
+ online_ids[cls_id].append(tid)
+ online_scores[cls_id].append(tscore)
+ # save results
+ results[cls_id].append(
+ (frame_id + 1, online_tlwhs[cls_id], online_scores[cls_id],
+ online_ids[cls_id]))
+
+ timer.toc()
+ save_vis_results(data, frame_id, online_ids, online_tlwhs,
+ online_scores, timer.average_time, show_image,
+ save_dir, self.cfg.num_classes)
+ frame_id += 1
+
+ return results, frame_id, timer.average_time, timer.calls
+
+ def _eval_seq_sde(self,
+ dataloader,
+ save_dir=None,
+ show_image=False,
+ frame_rate=30,
+ seq_name='',
+ scaled=False,
+ det_file='',
+ draw_threshold=0):
+ if save_dir:
+ if not os.path.exists(save_dir): os.makedirs(save_dir)
+ use_detector = False if not self.model.detector else True
+
+ timer = MOTTimer()
+ results = defaultdict(list)
+ frame_id = 0
+ self.status['mode'] = 'track'
+ self.model.eval()
+ self.model.reid.eval()
+ if not use_detector:
+ dets_list = load_det_results(det_file, len(dataloader))
+ logger.info('Finish loading detection results file {}.'.format(
+ det_file))
+
+ for step_id, data in enumerate(dataloader):
+ self.status['step_id'] = step_id
+ if frame_id % 40 == 0:
+ logger.info('Processing frame {} ({:.2f} fps)'.format(
+ frame_id, 1. / max(1e-5, timer.average_time)))
+
+ ori_image = data['ori_image'] # [bs, H, W, 3]
+ ori_image_shape = data['ori_image'].shape[1:3]
+ # ori_image_shape: [H, W]
+
+ input_shape = data['image'].shape[2:]
+ # input_shape: [h, w], before data transforms, set in model config
+
+ im_shape = data['im_shape'][0].numpy()
+ # im_shape: [new_h, new_w], after data transforms
+ scale_factor = data['scale_factor'][0].numpy()
+
+ empty_detections = False
+ # when it has no detected bboxes, will not inference reid model
+ # and if visualize, use original image instead
+
+ # forward
+ timer.tic()
+ if not use_detector:
+ dets = dets_list[frame_id]
+ bbox_tlwh = np.array(dets['bbox'], dtype='float32')
+ if bbox_tlwh.shape[0] > 0:
+ # detector outputs: pred_cls_ids, pred_scores, pred_bboxes
+ pred_cls_ids = np.array(dets['cls_id'], dtype='float32')
+ pred_scores = np.array(dets['score'], dtype='float32')
+ pred_bboxes = np.concatenate(
+ (bbox_tlwh[:, 0:2],
+ bbox_tlwh[:, 2:4] + bbox_tlwh[:, 0:2]),
+ axis=1)
+ else:
+ logger.warning(
+ 'Frame {} has not object, try to modify score threshold.'.
+ format(frame_id))
+ empty_detections = True
+ else:
+ outs = self.model.detector(data)
+ outs['bbox'] = outs['bbox'].numpy()
+ outs['bbox_num'] = outs['bbox_num'].numpy()
+
+ if outs['bbox_num'] > 0 and empty_detections == False:
+ # detector outputs: pred_cls_ids, pred_scores, pred_bboxes
+ pred_cls_ids = outs['bbox'][:, 0:1]
+ pred_scores = outs['bbox'][:, 1:2]
+ if not scaled:
+ # Note: scaled=False only in JDE YOLOv3 or other detectors
+ # with LetterBoxResize and JDEBBoxPostProcess.
+ #
+ # 'scaled' means whether the coords after detector outputs
+ # have been scaled back to the original image, set True
+ # in general detector, set False in JDE YOLOv3.
+ pred_bboxes = scale_coords(outs['bbox'][:, 2:],
+ input_shape, im_shape,
+ scale_factor)
+ else:
+ pred_bboxes = outs['bbox'][:, 2:]
+ else:
+ logger.warning(
+ 'Frame {} has not detected object, try to modify score threshold.'.
+ format(frame_id))
+ empty_detections = True
+
+ if not empty_detections:
+ pred_xyxys, keep_idx = clip_box(pred_bboxes, ori_image_shape)
+ if len(keep_idx[0]) == 0:
+ logger.warning(
+ 'Frame {} has not detected object left after clip_box.'.
+ format(frame_id))
+ empty_detections = True
+
+ if empty_detections:
+ timer.toc()
+ # if visualize, use original image instead
+ online_ids, online_tlwhs, online_scores = None, None, None
+ save_vis_results(data, frame_id, online_ids, online_tlwhs,
+ online_scores, timer.average_time, show_image,
+ save_dir, self.cfg.num_classes)
+ frame_id += 1
+ # thus will not inference reid model
+ continue
+
+ pred_scores = pred_scores[keep_idx[0]]
+ pred_cls_ids = pred_cls_ids[keep_idx[0]]
+ pred_tlwhs = np.concatenate(
+ (pred_xyxys[:, 0:2],
+ pred_xyxys[:, 2:4] - pred_xyxys[:, 0:2] + 1),
+ axis=1)
+ pred_dets = np.concatenate(
+ (pred_tlwhs, pred_scores, pred_cls_ids), axis=1)
+
+ tracker = self.model.tracker
+ crops = get_crops(
+ pred_xyxys,
+ ori_image,
+ w=tracker.input_size[0],
+ h=tracker.input_size[1])
+ crops = paddle.to_tensor(crops)
+
+ data.update({'crops': crops})
+ pred_embs = self.model(data).numpy()
+
+ tracker.predict()
+ online_targets = tracker.update(pred_dets, pred_embs)
+
+ online_tlwhs, online_scores, online_ids = [], [], []
+ for t in online_targets:
+ if not t.is_confirmed() or t.time_since_update > 1:
+ continue
+ tlwh = t.to_tlwh()
+ tscore = t.score
+ tid = t.track_id
+ if tscore < draw_threshold: continue
+ if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
+ if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
+ 3] > tracker.vertical_ratio:
+ continue
+ online_tlwhs.append(tlwh)
+ online_scores.append(tscore)
+ online_ids.append(tid)
+ timer.toc()
+
+ # save results
+ results[0].append(
+ (frame_id + 1, online_tlwhs, online_scores, online_ids))
+ save_vis_results(data, frame_id, online_ids, online_tlwhs,
+ online_scores, timer.average_time, show_image,
+ save_dir, self.cfg.num_classes)
+ frame_id += 1
+
+ return results, frame_id, timer.average_time, timer.calls
+
+ def mot_evaluate(self,
+ data_root,
+ seqs,
+ output_dir,
+ data_type='mot',
+ model_type='JDE',
+ save_images=False,
+ save_videos=False,
+ show_image=False,
+ scaled=False,
+ det_results_dir=''):
+ if not os.path.exists(output_dir): os.makedirs(output_dir)
+ result_root = os.path.join(output_dir, 'mot_results')
+ if not os.path.exists(result_root): os.makedirs(result_root)
+ assert data_type in ['mot', 'mcmot', 'kitti'], \
+ "data_type should be 'mot', 'mcmot' or 'kitti'"
+ assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
+ "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
+
+ # run tracking
+ n_frame = 0
+ timer_avgs, timer_calls = [], []
+ for seq in seqs:
+ infer_dir = os.path.join(data_root, seq)
+ if not os.path.exists(infer_dir) or not os.path.isdir(infer_dir):
+ logger.warning("Seq {} error, {} has no images.".format(
+ seq, infer_dir))
+ continue
+ if os.path.exists(os.path.join(infer_dir, 'img1')):
+ infer_dir = os.path.join(infer_dir, 'img1')
+
+ frame_rate = 30
+ seqinfo = os.path.join(data_root, seq, 'seqinfo.ini')
+ if os.path.exists(seqinfo):
+ meta_info = open(seqinfo).read()
+ frame_rate = int(meta_info[meta_info.find('frameRate') + 10:
+ meta_info.find('\nseqLength')])
+
+ save_dir = os.path.join(output_dir, 'mot_outputs',
+ seq) if save_images or save_videos else None
+ logger.info('start seq: {}'.format(seq))
+
+ self.dataset.set_images(self.get_infer_images(infer_dir))
+ dataloader = create('EvalMOTReader')(self.dataset, 0)
+
+ result_filename = os.path.join(result_root, '{}.txt'.format(seq))
+
+ with paddle.no_grad():
+ if model_type in ['JDE', 'FairMOT']:
+ results, nf, ta, tc = self._eval_seq_jde(
+ dataloader,
+ save_dir=save_dir,
+ show_image=show_image,
+ frame_rate=frame_rate)
+ elif model_type in ['DeepSORT']:
+ results, nf, ta, tc = self._eval_seq_sde(
+ dataloader,
+ save_dir=save_dir,
+ show_image=show_image,
+ frame_rate=frame_rate,
+ seq_name=seq,
+ scaled=scaled,
+ det_file=os.path.join(det_results_dir,
+ '{}.txt'.format(seq)))
+ else:
+ raise ValueError(model_type)
+
+ write_mot_results(result_filename, results, data_type,
+ self.cfg.num_classes)
+ n_frame += nf
+ timer_avgs.append(ta)
+ timer_calls.append(tc)
+
+ if save_videos:
+ output_video_path = os.path.join(save_dir, '..',
+ '{}_vis.mp4'.format(seq))
+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
+ save_dir, output_video_path)
+ os.system(cmd_str)
+ logger.info('Save video in {}.'.format(output_video_path))
+
+ logger.info('Evaluate seq: {}'.format(seq))
+ # update metrics
+ for metric in self._metrics:
+ metric.update(data_root, seq, data_type, result_root,
+ result_filename)
+
+ timer_avgs = np.asarray(timer_avgs)
+ timer_calls = np.asarray(timer_calls)
+ all_time = np.dot(timer_avgs, timer_calls)
+ avg_time = all_time / np.sum(timer_calls)
+ logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
+ all_time, 1.0 / avg_time))
+
+ # accumulate metric to log out
+ for metric in self._metrics:
+ metric.accumulate()
+ metric.log()
+ # reset metric states for metric may performed multiple times
+ self._reset_metrics()
+
+ def get_infer_images(self, infer_dir):
+ assert infer_dir is None or os.path.isdir(infer_dir), \
+ "{} is not a directory".format(infer_dir)
+ images = set()
+ assert os.path.isdir(infer_dir), \
+ "infer_dir {} is not a directory".format(infer_dir)
+ exts = ['jpg', 'jpeg', 'png', 'bmp']
+ exts += [ext.upper() for ext in exts]
+ for ext in exts:
+ images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
+ images = list(images)
+ images.sort()
+ assert len(images) > 0, "no image found in {}".format(infer_dir)
+ logger.info("Found {} inference images in total.".format(len(images)))
+ return images
+
+ def mot_predict_seq(self,
+ video_file,
+ frame_rate,
+ image_dir,
+ output_dir,
+ data_type='mot',
+ model_type='JDE',
+ save_images=False,
+ save_videos=True,
+ show_image=False,
+ scaled=False,
+ det_results_dir='',
+ draw_threshold=0.5):
+ assert video_file is not None or image_dir is not None, \
+ "--video_file or --image_dir should be set."
+ assert video_file is None or os.path.isfile(video_file), \
+ "{} is not a file".format(video_file)
+ assert image_dir is None or os.path.isdir(image_dir), \
+ "{} is not a directory".format(image_dir)
+
+ if not os.path.exists(output_dir): os.makedirs(output_dir)
+ result_root = os.path.join(output_dir, 'mot_results')
+ if not os.path.exists(result_root): os.makedirs(result_root)
+ assert data_type in ['mot', 'mcmot', 'kitti'], \
+ "data_type should be 'mot', 'mcmot' or 'kitti'"
+ assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
+ "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
+
+ # run tracking
+ if video_file:
+ seq = video_file.split('/')[-1].split('.')[0]
+ self.dataset.set_video(video_file, frame_rate)
+ logger.info('Starting tracking video {}'.format(video_file))
+ elif image_dir:
+ seq = image_dir.split('/')[-1].split('.')[0]
+ if os.path.exists(os.path.join(image_dir, 'img1')):
+ image_dir = os.path.join(image_dir, 'img1')
+ images = [
+ '{}/{}'.format(image_dir, x) for x in os.listdir(image_dir)
+ ]
+ images.sort()
+ self.dataset.set_images(images)
+ logger.info('Starting tracking folder {}, found {} images'.format(
+ image_dir, len(images)))
+ else:
+ raise ValueError('--video_file or --image_dir should be set.')
+
+ save_dir = os.path.join(output_dir, 'mot_outputs',
+ seq) if save_images or save_videos else None
+
+ dataloader = create('TestMOTReader')(self.dataset, 0)
+ result_filename = os.path.join(result_root, '{}.txt'.format(seq))
+ if frame_rate == -1:
+ frame_rate = self.dataset.frame_rate
+
+ with paddle.no_grad():
+ if model_type in ['JDE', 'FairMOT']:
+ results, nf, ta, tc = self._eval_seq_jde(
+ dataloader,
+ save_dir=save_dir,
+ show_image=show_image,
+ frame_rate=frame_rate,
+ draw_threshold=draw_threshold)
+ elif model_type in ['DeepSORT']:
+ results, nf, ta, tc = self._eval_seq_sde(
+ dataloader,
+ save_dir=save_dir,
+ show_image=show_image,
+ frame_rate=frame_rate,
+ seq_name=seq,
+ scaled=scaled,
+ det_file=os.path.join(det_results_dir,
+ '{}.txt'.format(seq)),
+ draw_threshold=draw_threshold)
+ else:
+ raise ValueError(model_type)
+
+ if save_videos:
+ output_video_path = os.path.join(save_dir, '..',
+ '{}_vis.mp4'.format(seq))
+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
+ save_dir, output_video_path)
+ os.system(cmd_str)
+ logger.info('Save video in {}'.format(output_video_path))
+
+ write_mot_results(result_filename, results, data_type,
+ self.cfg.num_classes)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/trainer.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/trainer.py
new file mode 100644
index 000000000..dc739ff62
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/engine/trainer.py
@@ -0,0 +1,715 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+import copy
+import time
+
+import numpy as np
+from PIL import Image, ImageOps
+
+import paddle
+import paddle.distributed as dist
+from paddle.distributed import fleet
+from paddle import amp
+from paddle.static import InputSpec
+from ppdet.optimizer import ModelEMA
+
+from ppdet.core.workspace import create
+from ppdet.utils.checkpoint import load_weight, load_pretrain_weight
+from ppdet.utils.visualizer import visualize_results, save_result
+from ppdet.metrics import Metric, COCOMetric, VOCMetric, WiderFaceMetric, get_infer_results, KeyPointTopDownCOCOEval, KeyPointTopDownMPIIEval
+from ppdet.metrics import RBoxMetric, JDEDetMetric, SNIPERCOCOMetric
+from ppdet.data.source.sniper_coco import SniperCOCODataSet
+from ppdet.data.source.category import get_categories
+import ppdet.utils.stats as stats
+from ppdet.utils import profiler
+
+from .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, WiferFaceEval, VisualDLWriter, SniperProposalsGenerator
+from .export_utils import _dump_infer_config, _prune_input_spec
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger('ppdet.engine')
+
+__all__ = ['Trainer']
+
+MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT']
+
+
+class Trainer(object):
+ def __init__(self, cfg, mode='train'):
+ self.cfg = cfg
+ assert mode.lower() in ['train', 'eval', 'test'], \
+ "mode should be 'train', 'eval' or 'test'"
+ self.mode = mode.lower()
+ self.optimizer = None
+ self.is_loaded_weights = False
+
+ # build data loader
+ if cfg.architecture in MOT_ARCH and self.mode in ['eval', 'test']:
+ self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]
+ else:
+ self.dataset = cfg['{}Dataset'.format(self.mode.capitalize())]
+
+ if cfg.architecture == 'DeepSORT' and self.mode == 'train':
+ logger.error('DeepSORT has no need of training on mot dataset.')
+ sys.exit(1)
+
+ if self.mode == 'train':
+ self.loader = create('{}Reader'.format(self.mode.capitalize()))(
+ self.dataset, cfg.worker_num)
+
+ if cfg.architecture == 'JDE' and self.mode == 'train':
+ cfg['JDEEmbeddingHead'][
+ 'num_identities'] = self.dataset.num_identities_dict[0]
+ # JDE only support single class MOT now.
+
+ if cfg.architecture == 'FairMOT' and self.mode == 'train':
+ cfg['FairMOTEmbeddingHead'][
+ 'num_identities_dict'] = self.dataset.num_identities_dict
+ # FairMOT support single class and multi-class MOT now.
+
+ # build model
+ if 'model' not in self.cfg:
+ self.model = create(cfg.architecture)
+ else:
+ self.model = self.cfg.model
+ self.is_loaded_weights = True
+
+ #normalize params for deploy
+ self.model.load_meanstd(cfg['TestReader']['sample_transforms'])
+
+ self.use_ema = ('use_ema' in cfg and cfg['use_ema'])
+ if self.use_ema:
+ ema_decay = self.cfg.get('ema_decay', 0.9998)
+ cycle_epoch = self.cfg.get('cycle_epoch', -1)
+ self.ema = ModelEMA(
+ self.model,
+ decay=ema_decay,
+ use_thres_step=True,
+ cycle_epoch=cycle_epoch)
+
+ # EvalDataset build with BatchSampler to evaluate in single device
+ # TODO: multi-device evaluate
+ if self.mode == 'eval':
+ self._eval_batch_sampler = paddle.io.BatchSampler(
+ self.dataset, batch_size=self.cfg.EvalReader['batch_size'])
+ self.loader = create('{}Reader'.format(self.mode.capitalize()))(
+ self.dataset, cfg.worker_num, self._eval_batch_sampler)
+ # TestDataset build after user set images, skip loader creation here
+
+ # build optimizer in train mode
+ if self.mode == 'train':
+ steps_per_epoch = len(self.loader)
+ self.lr = create('LearningRate')(steps_per_epoch)
+ self.optimizer = create('OptimizerBuilder')(self.lr, self.model)
+
+ if self.cfg.get('unstructured_prune'):
+ self.pruner = create('UnstructuredPruner')(self.model,
+ steps_per_epoch)
+
+ self._nranks = dist.get_world_size()
+ self._local_rank = dist.get_rank()
+
+ self.status = {}
+
+ self.start_epoch = 0
+ self.end_epoch = 0 if 'epoch' not in cfg else cfg.epoch
+
+ # initial default callbacks
+ self._init_callbacks()
+
+ # initial default metrics
+ self._init_metrics()
+ self._reset_metrics()
+
+ def _init_callbacks(self):
+ if self.mode == 'train':
+ self._callbacks = [LogPrinter(self), Checkpointer(self)]
+ if self.cfg.get('use_vdl', False):
+ self._callbacks.append(VisualDLWriter(self))
+ if self.cfg.get('save_proposals', False):
+ self._callbacks.append(SniperProposalsGenerator(self))
+ self._compose_callback = ComposeCallback(self._callbacks)
+ elif self.mode == 'eval':
+ self._callbacks = [LogPrinter(self)]
+ if self.cfg.metric == 'WiderFace':
+ self._callbacks.append(WiferFaceEval(self))
+ self._compose_callback = ComposeCallback(self._callbacks)
+ elif self.mode == 'test' and self.cfg.get('use_vdl', False):
+ self._callbacks = [VisualDLWriter(self)]
+ self._compose_callback = ComposeCallback(self._callbacks)
+ else:
+ self._callbacks = []
+ self._compose_callback = None
+
+ def _init_metrics(self, validate=False):
+ if self.mode == 'test' or (self.mode == 'train' and not validate):
+ self._metrics = []
+ return
+ classwise = self.cfg['classwise'] if 'classwise' in self.cfg else False
+ if self.cfg.metric == 'COCO' or self.cfg.metric == "SNIPERCOCO":
+ # TODO: bias should be unified
+ bias = self.cfg['bias'] if 'bias' in self.cfg else 0
+ output_eval = self.cfg['output_eval'] \
+ if 'output_eval' in self.cfg else None
+ save_prediction_only = self.cfg.get('save_prediction_only', False)
+
+ # pass clsid2catid info to metric instance to avoid multiple loading
+ # annotation file
+ clsid2catid = {v: k for k, v in self.dataset.catid2clsid.items()} \
+ if self.mode == 'eval' else None
+
+ # when do validation in train, annotation file should be get from
+ # EvalReader instead of self.dataset(which is TrainReader)
+ anno_file = self.dataset.get_anno()
+ dataset = self.dataset
+ if self.mode == 'train' and validate:
+ eval_dataset = self.cfg['EvalDataset']
+ eval_dataset.check_or_download_dataset()
+ anno_file = eval_dataset.get_anno()
+ dataset = eval_dataset
+
+ IouType = self.cfg['IouType'] if 'IouType' in self.cfg else 'bbox'
+ if self.cfg.metric == "COCO":
+ self._metrics = [
+ COCOMetric(
+ anno_file=anno_file,
+ clsid2catid=clsid2catid,
+ classwise=classwise,
+ output_eval=output_eval,
+ bias=bias,
+ IouType=IouType,
+ save_prediction_only=save_prediction_only)
+ ]
+ elif self.cfg.metric == "SNIPERCOCO": # sniper
+ self._metrics = [
+ SNIPERCOCOMetric(
+ anno_file=anno_file,
+ dataset=dataset,
+ clsid2catid=clsid2catid,
+ classwise=classwise,
+ output_eval=output_eval,
+ bias=bias,
+ IouType=IouType,
+ save_prediction_only=save_prediction_only)
+ ]
+ elif self.cfg.metric == 'RBOX':
+ # TODO: bias should be unified
+ bias = self.cfg['bias'] if 'bias' in self.cfg else 0
+ output_eval = self.cfg['output_eval'] \
+ if 'output_eval' in self.cfg else None
+ save_prediction_only = self.cfg.get('save_prediction_only', False)
+
+ # pass clsid2catid info to metric instance to avoid multiple loading
+ # annotation file
+ clsid2catid = {v: k for k, v in self.dataset.catid2clsid.items()} \
+ if self.mode == 'eval' else None
+
+ # when do validation in train, annotation file should be get from
+ # EvalReader instead of self.dataset(which is TrainReader)
+ anno_file = self.dataset.get_anno()
+ if self.mode == 'train' and validate:
+ eval_dataset = self.cfg['EvalDataset']
+ eval_dataset.check_or_download_dataset()
+ anno_file = eval_dataset.get_anno()
+
+ self._metrics = [
+ RBoxMetric(
+ anno_file=anno_file,
+ clsid2catid=clsid2catid,
+ classwise=classwise,
+ output_eval=output_eval,
+ bias=bias,
+ save_prediction_only=save_prediction_only)
+ ]
+ elif self.cfg.metric == 'VOC':
+ self._metrics = [
+ VOCMetric(
+ label_list=self.dataset.get_label_list(),
+ class_num=self.cfg.num_classes,
+ map_type=self.cfg.map_type,
+ classwise=classwise)
+ ]
+ elif self.cfg.metric == 'WiderFace':
+ multi_scale = self.cfg.multi_scale_eval if 'multi_scale_eval' in self.cfg else True
+ self._metrics = [
+ WiderFaceMetric(
+ image_dir=os.path.join(self.dataset.dataset_dir,
+ self.dataset.image_dir),
+ anno_file=self.dataset.get_anno(),
+ multi_scale=multi_scale)
+ ]
+ elif self.cfg.metric == 'KeyPointTopDownCOCOEval':
+ eval_dataset = self.cfg['EvalDataset']
+ eval_dataset.check_or_download_dataset()
+ anno_file = eval_dataset.get_anno()
+ save_prediction_only = self.cfg.get('save_prediction_only', False)
+ self._metrics = [
+ KeyPointTopDownCOCOEval(
+ anno_file,
+ len(eval_dataset),
+ self.cfg.num_joints,
+ self.cfg.save_dir,
+ save_prediction_only=save_prediction_only)
+ ]
+ elif self.cfg.metric == 'KeyPointTopDownMPIIEval':
+ eval_dataset = self.cfg['EvalDataset']
+ eval_dataset.check_or_download_dataset()
+ anno_file = eval_dataset.get_anno()
+ save_prediction_only = self.cfg.get('save_prediction_only', False)
+ self._metrics = [
+ KeyPointTopDownMPIIEval(
+ anno_file,
+ len(eval_dataset),
+ self.cfg.num_joints,
+ self.cfg.save_dir,
+ save_prediction_only=save_prediction_only)
+ ]
+ elif self.cfg.metric == 'MOTDet':
+ self._metrics = [JDEDetMetric(), ]
+ else:
+ logger.warning("Metric not support for metric type {}".format(
+ self.cfg.metric))
+ self._metrics = []
+
+ def _reset_metrics(self):
+ for metric in self._metrics:
+ metric.reset()
+
+ def register_callbacks(self, callbacks):
+ callbacks = [c for c in list(callbacks) if c is not None]
+ for c in callbacks:
+ assert isinstance(c, Callback), \
+ "metrics shoule be instances of subclass of Metric"
+ self._callbacks.extend(callbacks)
+ self._compose_callback = ComposeCallback(self._callbacks)
+
+ def register_metrics(self, metrics):
+ metrics = [m for m in list(metrics) if m is not None]
+ for m in metrics:
+ assert isinstance(m, Metric), \
+ "metrics shoule be instances of subclass of Metric"
+ self._metrics.extend(metrics)
+
+ def load_weights(self, weights):
+ if self.is_loaded_weights:
+ return
+ self.start_epoch = 0
+ load_pretrain_weight(self.model, weights)
+ logger.debug("Load weights {} to start training".format(weights))
+
+ def load_weights_sde(self, det_weights, reid_weights):
+ if self.model.detector:
+ load_weight(self.model.detector, det_weights)
+ load_weight(self.model.reid, reid_weights)
+ else:
+ load_weight(self.model.reid, reid_weights)
+
+ def resume_weights(self, weights):
+ # support Distill resume weights
+ if hasattr(self.model, 'student_model'):
+ self.start_epoch = load_weight(self.model.student_model, weights,
+ self.optimizer)
+ else:
+ self.start_epoch = load_weight(self.model, weights, self.optimizer)
+ logger.debug("Resume weights of epoch {}".format(self.start_epoch))
+
+ def train(self, validate=False):
+ assert self.mode == 'train', "Model not in 'train' mode"
+ Init_mark = False
+
+ model = self.model
+ if self.cfg.get('fleet', False):
+ model = fleet.distributed_model(model)
+ self.optimizer = fleet.distributed_optimizer(self.optimizer)
+ elif self._nranks > 1:
+ find_unused_parameters = self.cfg[
+ 'find_unused_parameters'] if 'find_unused_parameters' in self.cfg else False
+ model = paddle.DataParallel(
+ self.model, find_unused_parameters=find_unused_parameters)
+
+ # initial fp16
+ if self.cfg.get('fp16', False):
+ scaler = amp.GradScaler(
+ enable=self.cfg.use_gpu, init_loss_scaling=1024)
+
+ self.status.update({
+ 'epoch_id': self.start_epoch,
+ 'step_id': 0,
+ 'steps_per_epoch': len(self.loader)
+ })
+
+ self.status['batch_time'] = stats.SmoothedValue(
+ self.cfg.log_iter, fmt='{avg:.4f}')
+ self.status['data_time'] = stats.SmoothedValue(
+ self.cfg.log_iter, fmt='{avg:.4f}')
+ self.status['training_staus'] = stats.TrainingStats(self.cfg.log_iter)
+
+ if self.cfg.get('print_flops', False):
+ self._flops(self.loader)
+ profiler_options = self.cfg.get('profiler_options', None)
+
+ self._compose_callback.on_train_begin(self.status)
+
+ for epoch_id in range(self.start_epoch, self.cfg.epoch):
+ self.status['mode'] = 'train'
+ self.status['epoch_id'] = epoch_id
+ self._compose_callback.on_epoch_begin(self.status)
+ self.loader.dataset.set_epoch(epoch_id)
+ model.train()
+ iter_tic = time.time()
+ for step_id, data in enumerate(self.loader):
+ self.status['data_time'].update(time.time() - iter_tic)
+ self.status['step_id'] = step_id
+ profiler.add_profiler_step(profiler_options)
+ self._compose_callback.on_step_begin(self.status)
+ data['epoch_id'] = epoch_id
+
+ if self.cfg.get('fp16', False):
+ with amp.auto_cast(enable=self.cfg.use_gpu):
+ # model forward
+ outputs = model(data)
+ loss = outputs['loss']
+
+ # model backward
+ scaled_loss = scaler.scale(loss)
+ scaled_loss.backward()
+ # in dygraph mode, optimizer.minimize is equal to optimizer.step
+ scaler.minimize(self.optimizer, scaled_loss)
+ else:
+ # model forward
+ outputs = model(data)
+ loss = outputs['loss']
+ # model backward
+ loss.backward()
+ self.optimizer.step()
+ curr_lr = self.optimizer.get_lr()
+ self.lr.step()
+ if self.cfg.get('unstructured_prune'):
+ self.pruner.step()
+ self.optimizer.clear_grad()
+ self.status['learning_rate'] = curr_lr
+
+ if self._nranks < 2 or self._local_rank == 0:
+ self.status['training_staus'].update(outputs)
+
+ self.status['batch_time'].update(time.time() - iter_tic)
+ self._compose_callback.on_step_end(self.status)
+ if self.use_ema:
+ self.ema.update(self.model)
+ iter_tic = time.time()
+
+ # apply ema weight on model
+ if self.use_ema:
+ weight = copy.deepcopy(self.model.state_dict())
+ self.model.set_dict(self.ema.apply())
+ if self.cfg.get('unstructured_prune'):
+ self.pruner.update_params()
+
+ self._compose_callback.on_epoch_end(self.status)
+
+ if validate and (self._nranks < 2 or self._local_rank == 0) \
+ and ((epoch_id + 1) % self.cfg.snapshot_epoch == 0 \
+ or epoch_id == self.end_epoch - 1):
+ if not hasattr(self, '_eval_loader'):
+ # build evaluation dataset and loader
+ self._eval_dataset = self.cfg.EvalDataset
+ self._eval_batch_sampler = \
+ paddle.io.BatchSampler(
+ self._eval_dataset,
+ batch_size=self.cfg.EvalReader['batch_size'])
+ self._eval_loader = create('EvalReader')(
+ self._eval_dataset,
+ self.cfg.worker_num,
+ batch_sampler=self._eval_batch_sampler)
+ # if validation in training is enabled, metrics should be re-init
+ # Init_mark makes sure this code will only execute once
+ if validate and Init_mark == False:
+ Init_mark = True
+ self._init_metrics(validate=validate)
+ self._reset_metrics()
+ with paddle.no_grad():
+ self.status['save_best_model'] = True
+ self._eval_with_loader(self._eval_loader)
+
+ # restore origin weight on model
+ if self.use_ema:
+ self.model.set_dict(weight)
+
+ self._compose_callback.on_train_end(self.status)
+
+ def _eval_with_loader(self, loader):
+ sample_num = 0
+ tic = time.time()
+ self._compose_callback.on_epoch_begin(self.status)
+ self.status['mode'] = 'eval'
+ self.model.eval()
+ if self.cfg.get('print_flops', False):
+ self._flops(loader)
+ for step_id, data in enumerate(loader):
+ self.status['step_id'] = step_id
+ self._compose_callback.on_step_begin(self.status)
+ # forward
+ outs = self.model(data)
+
+ # update metrics
+ for metric in self._metrics:
+ metric.update(data, outs)
+
+ sample_num += data['im_id'].numpy().shape[0]
+ self._compose_callback.on_step_end(self.status)
+
+ self.status['sample_num'] = sample_num
+ self.status['cost_time'] = time.time() - tic
+
+ # accumulate metric to log out
+ for metric in self._metrics:
+ metric.accumulate()
+ metric.log()
+ self._compose_callback.on_epoch_end(self.status)
+ # reset metric states for metric may performed multiple times
+ self._reset_metrics()
+
+ def evaluate(self):
+ with paddle.no_grad():
+ self._eval_with_loader(self.loader)
+
+ def predict(self,
+ images,
+ draw_threshold=0.5,
+ output_dir='output',
+ save_txt=False):
+ self.dataset.set_images(images)
+ loader = create('TestReader')(self.dataset, 0)
+
+ imid2path = self.dataset.get_imid2path()
+
+ anno_file = self.dataset.get_anno()
+ clsid2catid, catid2name = get_categories(
+ self.cfg.metric, anno_file=anno_file)
+
+ # Run Infer
+ self.status['mode'] = 'test'
+ self.model.eval()
+ if self.cfg.get('print_flops', False):
+ self._flops(loader)
+ results = []
+ for step_id, data in enumerate(loader):
+ self.status['step_id'] = step_id
+ # forward
+ outs = self.model(data)
+
+ for key in ['im_shape', 'scale_factor', 'im_id']:
+ outs[key] = data[key]
+ for key, value in outs.items():
+ if hasattr(value, 'numpy'):
+ outs[key] = value.numpy()
+ results.append(outs)
+ # sniper
+ if type(self.dataset) == SniperCOCODataSet:
+ results = self.dataset.anno_cropper.aggregate_chips_detections(
+ results)
+
+ for outs in results:
+ batch_res = get_infer_results(outs, clsid2catid)
+ bbox_num = outs['bbox_num']
+
+ start = 0
+ for i, im_id in enumerate(outs['im_id']):
+ image_path = imid2path[int(im_id)]
+ image = Image.open(image_path).convert('RGB')
+ image = ImageOps.exif_transpose(image)
+ self.status['original_image'] = np.array(image.copy())
+
+ end = start + bbox_num[i]
+ bbox_res = batch_res['bbox'][start:end] \
+ if 'bbox' in batch_res else None
+ mask_res = batch_res['mask'][start:end] \
+ if 'mask' in batch_res else None
+ segm_res = batch_res['segm'][start:end] \
+ if 'segm' in batch_res else None
+ keypoint_res = batch_res['keypoint'][start:end] \
+ if 'keypoint' in batch_res else None
+ image = visualize_results(
+ image, bbox_res, mask_res, segm_res, keypoint_res,
+ int(im_id), catid2name, draw_threshold)
+ self.status['result_image'] = np.array(image.copy())
+ if self._compose_callback:
+ self._compose_callback.on_step_end(self.status)
+ # save image with detection
+ save_name = self._get_save_image_name(output_dir, image_path)
+ logger.info("Detection bbox results save in {}".format(
+ save_name))
+ image.save(save_name, quality=95)
+ if save_txt:
+ save_path = os.path.splitext(save_name)[0] + '.txt'
+ results = {}
+ results["im_id"] = im_id
+ if bbox_res:
+ results["bbox_res"] = bbox_res
+ if keypoint_res:
+ results["keypoint_res"] = keypoint_res
+ save_result(save_path, results, catid2name, draw_threshold)
+ start = end
+
+ def _get_save_image_name(self, output_dir, image_path):
+ """
+ Get save image name from source image path.
+ """
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ image_name = os.path.split(image_path)[-1]
+ name, ext = os.path.splitext(image_name)
+ return os.path.join(output_dir, "{}".format(name)) + ext
+
+ def _get_infer_cfg_and_input_spec(self, save_dir, prune_input=True):
+ image_shape = None
+ im_shape = [None, 2]
+ scale_factor = [None, 2]
+ if self.cfg.architecture in MOT_ARCH:
+ test_reader_name = 'TestMOTReader'
+ else:
+ test_reader_name = 'TestReader'
+ if 'inputs_def' in self.cfg[test_reader_name]:
+ inputs_def = self.cfg[test_reader_name]['inputs_def']
+ image_shape = inputs_def.get('image_shape', None)
+ # set image_shape=[None, 3, -1, -1] as default
+ if image_shape is None:
+ image_shape = [None, 3, -1, -1]
+
+ if len(image_shape) == 3:
+ image_shape = [None] + image_shape
+ else:
+ im_shape = [image_shape[0], 2]
+ scale_factor = [image_shape[0], 2]
+
+ if hasattr(self.model, 'deploy'):
+ self.model.deploy = True
+ if hasattr(self.model, 'fuse_norm'):
+ self.model.fuse_norm = self.cfg['TestReader'].get('fuse_normalize',
+ False)
+
+ # Save infer cfg
+ _dump_infer_config(self.cfg,
+ os.path.join(save_dir, 'infer_cfg.yml'), image_shape,
+ self.model)
+
+ input_spec = [{
+ "image": InputSpec(
+ shape=image_shape, name='image'),
+ "im_shape": InputSpec(
+ shape=im_shape, name='im_shape'),
+ "scale_factor": InputSpec(
+ shape=scale_factor, name='scale_factor')
+ }]
+ if self.cfg.architecture == 'DeepSORT':
+ input_spec[0].update({
+ "crops": InputSpec(
+ shape=[None, 3, 192, 64], name='crops')
+ })
+ if prune_input:
+ static_model = paddle.jit.to_static(
+ self.model, input_spec=input_spec)
+ # NOTE: dy2st do not pruned program, but jit.save will prune program
+ # input spec, prune input spec here and save with pruned input spec
+ pruned_input_spec = _prune_input_spec(
+ input_spec, static_model.forward.main_program,
+ static_model.forward.outputs)
+ else:
+ static_model = None
+ pruned_input_spec = input_spec
+
+ # TODO: Hard code, delete it when support prune input_spec.
+ if self.cfg.architecture == 'PicoDet':
+ pruned_input_spec = [{
+ "image": InputSpec(
+ shape=image_shape, name='image')
+ }]
+
+ return static_model, pruned_input_spec
+
+ def export(self, output_dir='output_inference'):
+ self.model.eval()
+ model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]
+ save_dir = os.path.join(output_dir, model_name)
+ if not os.path.exists(save_dir):
+ os.makedirs(save_dir)
+
+ static_model, pruned_input_spec = self._get_infer_cfg_and_input_spec(
+ save_dir)
+
+ # dy2st and save model
+ if 'slim' not in self.cfg or self.cfg['slim_type'] != 'QAT':
+ paddle.jit.save(
+ static_model,
+ os.path.join(save_dir, 'model'),
+ input_spec=pruned_input_spec)
+ else:
+ self.cfg.slim.save_quantized_model(
+ self.model,
+ os.path.join(save_dir, 'model'),
+ input_spec=pruned_input_spec)
+ logger.info("Export model and saved in {}".format(save_dir))
+
+ def post_quant(self, output_dir='output_inference'):
+ model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]
+ save_dir = os.path.join(output_dir, model_name)
+ if not os.path.exists(save_dir):
+ os.makedirs(save_dir)
+
+ for idx, data in enumerate(self.loader):
+ self.model(data)
+ if idx == int(self.cfg.get('quant_batch_num', 10)):
+ break
+
+ # TODO: support prune input_spec
+ _, pruned_input_spec = self._get_infer_cfg_and_input_spec(
+ save_dir, prune_input=False)
+
+ self.cfg.slim.save_quantized_model(
+ self.model,
+ os.path.join(save_dir, 'model'),
+ input_spec=pruned_input_spec)
+ logger.info("Export Post-Quant model and saved in {}".format(save_dir))
+
+ def _flops(self, loader):
+ self.model.eval()
+ try:
+ import paddleslim
+ except Exception as e:
+ logger.warning(
+ 'Unable to calculate flops, please install paddleslim, for example: `pip install paddleslim`'
+ )
+ return
+
+ from paddleslim.analysis import dygraph_flops as flops
+ input_data = None
+ for data in loader:
+ input_data = data
+ break
+
+ input_spec = [{
+ "image": input_data['image'][0].unsqueeze(0),
+ "im_shape": input_data['im_shape'][0].unsqueeze(0),
+ "scale_factor": input_data['scale_factor'][0].unsqueeze(0)
+ }]
+ flops = flops(self.model, input_spec) / (1000**3)
+ logger.info(" Model FLOPs : {:.6f}G. (image shape is {})".format(
+ flops, input_data['image'][0].unsqueeze(0).shape))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/README.md b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/README.md
new file mode 100644
index 000000000..7ada0acf7
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/README.md
@@ -0,0 +1,38 @@
+# 自定义OP编译
+旋转框IOU计算OP是参考[自定义外部算子](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/07_new_op/new_custom_op.html) 。
+
+## 1. 环境依赖
+- Paddle >= 2.0.1
+- gcc 8.2
+
+## 2. 安装
+```
+python3.7 setup.py install
+```
+
+按照如下方式使用
+```
+# 引入自定义op
+from rbox_iou_ops import rbox_iou
+
+paddle.set_device('gpu:0')
+paddle.disable_static()
+
+rbox1 = np.random.rand(13000, 5)
+rbox2 = np.random.rand(7, 5)
+
+pd_rbox1 = paddle.to_tensor(rbox1)
+pd_rbox2 = paddle.to_tensor(rbox2)
+
+iou = rbox_iou(pd_rbox1, pd_rbox2)
+print('iou', iou)
+```
+
+## 3. 单元测试
+单元测试`test.py`文件中,通过对比python实现的结果和测试自定义op结果。
+
+由于python计算细节与cpp计算细节略有区别,误差区间设置为0.02。
+```
+python3.7 test.py
+```
+提示`rbox_iou OP compute right!`说明OP测试通过。
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/rbox_iou_op.cc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/rbox_iou_op.cc
new file mode 100644
index 000000000..6031953d2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/rbox_iou_op.cc
@@ -0,0 +1,97 @@
+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated
+
+#include "rbox_iou_op.h"
+#include "paddle/extension.h"
+
+
+template
+void rbox_iou_cpu_kernel(
+ const int rbox1_num,
+ const int rbox2_num,
+ const T* rbox1_data_ptr,
+ const T* rbox2_data_ptr,
+ T* output_data_ptr) {
+
+ int i, j;
+ for (i = 0; i < rbox1_num; i++) {
+ for (j = 0; j < rbox2_num; j++) {
+ int offset = i * rbox2_num + j;
+ output_data_ptr[offset] = rbox_iou_single(rbox1_data_ptr + i * 5, rbox2_data_ptr + j * 5);
+ }
+ }
+}
+
+
+#define CHECK_INPUT_CPU(x) PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
+
+std::vector RboxIouCPUForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2) {
+ CHECK_INPUT_CPU(rbox1);
+ CHECK_INPUT_CPU(rbox2);
+
+ auto rbox1_num = rbox1.shape()[0];
+ auto rbox2_num = rbox2.shape()[0];
+
+ auto output = paddle::Tensor(paddle::PlaceType::kCPU, {rbox1_num, rbox2_num});
+
+ PD_DISPATCH_FLOATING_TYPES(
+ rbox1.type(),
+ "rbox_iou_cpu_kernel",
+ ([&] {
+ rbox_iou_cpu_kernel(
+ rbox1_num,
+ rbox2_num,
+ rbox1.data(),
+ rbox2.data(),
+ output.mutable_data());
+ }));
+
+ return {output};
+}
+
+
+#ifdef PADDLE_WITH_CUDA
+std::vector RboxIouCUDAForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2);
+#endif
+
+
+#define CHECK_INPUT_SAME(x1, x2) PD_CHECK(x1.place() == x2.place(), "input must be smae pacle.")
+
+std::vector RboxIouForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2) {
+ CHECK_INPUT_SAME(rbox1, rbox2);
+ if (rbox1.place() == paddle::PlaceType::kCPU) {
+ return RboxIouCPUForward(rbox1, rbox2);
+#ifdef PADDLE_WITH_CUDA
+ } else if (rbox1.place() == paddle::PlaceType::kGPU) {
+ return RboxIouCUDAForward(rbox1, rbox2);
+#endif
+ }
+}
+
+std::vector> InferShape(std::vector rbox1_shape, std::vector rbox2_shape) {
+ return {{rbox1_shape[0], rbox2_shape[0]}};
+}
+
+std::vector InferDtype(paddle::DataType t1, paddle::DataType t2) {
+ return {t1};
+}
+
+PD_BUILD_OP(rbox_iou)
+ .Inputs({"RBOX1", "RBOX2"})
+ .Outputs({"Output"})
+ .SetKernelFn(PD_KERNEL(RboxIouForward))
+ .SetInferShapeFn(PD_INFER_SHAPE(InferShape))
+ .SetInferDtypeFn(PD_INFER_DTYPE(InferDtype));
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/rbox_iou_op.cu b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/rbox_iou_op.cu
new file mode 100644
index 000000000..8ec43e54b
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/rbox_iou_op.cu
@@ -0,0 +1,120 @@
+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated
+
+#include "rbox_iou_op.h"
+#include "paddle/extension.h"
+
+// 2D block with 32 * 16 = 512 threads per block
+const int BLOCK_DIM_X = 32;
+const int BLOCK_DIM_Y = 16;
+
+/**
+ Computes ceil(a / b)
+*/
+
+static inline int CeilDiv(const int a, const int b) {
+ return (a + b - 1) / b;
+}
+
+template
+__global__ void rbox_iou_cuda_kernel(
+ const int rbox1_num,
+ const int rbox2_num,
+ const T* rbox1_data_ptr,
+ const T* rbox2_data_ptr,
+ T* output_data_ptr) {
+
+ // get row_start and col_start
+ const int rbox1_block_idx = blockIdx.x * blockDim.x;
+ const int rbox2_block_idx = blockIdx.y * blockDim.y;
+
+ const int rbox1_thread_num = min(rbox1_num - rbox1_block_idx, blockDim.x);
+ const int rbox2_thread_num = min(rbox2_num - rbox2_block_idx, blockDim.y);
+
+ __shared__ T block_boxes1[BLOCK_DIM_X * 5];
+ __shared__ T block_boxes2[BLOCK_DIM_Y * 5];
+
+
+ // It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y
+ if (threadIdx.x < rbox1_thread_num && threadIdx.y == 0) {
+ block_boxes1[threadIdx.x * 5 + 0] =
+ rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 0];
+ block_boxes1[threadIdx.x * 5 + 1] =
+ rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 1];
+ block_boxes1[threadIdx.x * 5 + 2] =
+ rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 2];
+ block_boxes1[threadIdx.x * 5 + 3] =
+ rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 3];
+ block_boxes1[threadIdx.x * 5 + 4] =
+ rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 4];
+ }
+
+ // threadIdx.x < BLOCK_DIM_Y=rbox2_thread_num, just use same condition as above: threadIdx.y == 0
+ if (threadIdx.x < rbox2_thread_num && threadIdx.y == 0) {
+ block_boxes2[threadIdx.x * 5 + 0] =
+ rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 0];
+ block_boxes2[threadIdx.x * 5 + 1] =
+ rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 1];
+ block_boxes2[threadIdx.x * 5 + 2] =
+ rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 2];
+ block_boxes2[threadIdx.x * 5 + 3] =
+ rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 3];
+ block_boxes2[threadIdx.x * 5 + 4] =
+ rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 4];
+ }
+
+ // sync
+ __syncthreads();
+
+ if (threadIdx.x < rbox1_thread_num && threadIdx.y < rbox2_thread_num) {
+ int offset = (rbox1_block_idx + threadIdx.x) * rbox2_num + rbox2_block_idx + threadIdx.y;
+ output_data_ptr[offset] = rbox_iou_single(block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5);
+ }
+}
+
+#define CHECK_INPUT_GPU(x) PD_CHECK(x.place() == paddle::PlaceType::kGPU, #x " must be a GPU Tensor.")
+
+std::vector RboxIouCUDAForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2) {
+ CHECK_INPUT_GPU(rbox1);
+ CHECK_INPUT_GPU(rbox2);
+
+ auto rbox1_num = rbox1.shape()[0];
+ auto rbox2_num = rbox2.shape()[0];
+
+ auto output = paddle::Tensor(paddle::PlaceType::kGPU, {rbox1_num, rbox2_num});
+
+ const int blocks_x = CeilDiv(rbox1_num, BLOCK_DIM_X);
+ const int blocks_y = CeilDiv(rbox2_num, BLOCK_DIM_Y);
+
+ dim3 blocks(blocks_x, blocks_y);
+ dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);
+
+ PD_DISPATCH_FLOATING_TYPES(
+ rbox1.type(),
+ "rbox_iou_cuda_kernel",
+ ([&] {
+ rbox_iou_cuda_kernel<<>>(
+ rbox1_num,
+ rbox2_num,
+ rbox1.data(),
+ rbox2.data(),
+ output.mutable_data());
+ }));
+
+ return {output};
+}
+
+
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/rbox_iou_op.h b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/rbox_iou_op.h
new file mode 100644
index 000000000..77fb62e39
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/rbox_iou_op.h
@@ -0,0 +1,356 @@
+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated
+
+#pragma once
+
+#include
+#include
+#include
+
+#ifdef __CUDACC__
+// Designates functions callable from the host (CPU) and the device (GPU)
+#define HOST_DEVICE __host__ __device__
+#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__
+#else
+#include
+#define HOST_DEVICE
+#define HOST_DEVICE_INLINE HOST_DEVICE inline
+#endif
+
+namespace {
+
+template
+struct RotatedBox {
+ T x_ctr, y_ctr, w, h, a;
+};
+
+template
+struct Point {
+ T x, y;
+ HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {}
+ HOST_DEVICE_INLINE Point operator+(const Point& p) const {
+ return Point(x + p.x, y + p.y);
+ }
+ HOST_DEVICE_INLINE Point& operator+=(const Point& p) {
+ x += p.x;
+ y += p.y;
+ return *this;
+ }
+ HOST_DEVICE_INLINE Point operator-(const Point& p) const {
+ return Point(x - p.x, y - p.y);
+ }
+ HOST_DEVICE_INLINE Point operator*(const T coeff) const {
+ return Point(x * coeff, y * coeff);
+ }
+};
+
+template
+HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) {
+ return A.x * B.x + A.y * B.y;
+}
+
+template
+HOST_DEVICE_INLINE T cross_2d(const Point& A, const Point& B) {
+ return A.x * B.y - B.x * A.y;
+}
+
+template
+HOST_DEVICE_INLINE void get_rotated_vertices(
+ const RotatedBox& box,
+ Point (&pts)[4]) {
+ // M_PI / 180. == 0.01745329251
+ //double theta = box.a * 0.01745329251;
+ //MODIFIED
+ double theta = box.a;
+ T cosTheta2 = (T)cos(theta) * 0.5f;
+ T sinTheta2 = (T)sin(theta) * 0.5f;
+
+ // y: top --> down; x: left --> right
+ pts[0].x = box.x_ctr - sinTheta2 * box.h - cosTheta2 * box.w;
+ pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w;
+ pts[1].x = box.x_ctr + sinTheta2 * box.h - cosTheta2 * box.w;
+ pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w;
+ pts[2].x = 2 * box.x_ctr - pts[0].x;
+ pts[2].y = 2 * box.y_ctr - pts[0].y;
+ pts[3].x = 2 * box.x_ctr - pts[1].x;
+ pts[3].y = 2 * box.y_ctr - pts[1].y;
+}
+
+template
+HOST_DEVICE_INLINE int get_intersection_points(
+ const Point (&pts1)[4],
+ const Point (&pts2)[4],
+ Point (&intersections)[24]) {
+ // Line vector
+ // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1]
+ Point vec1[4], vec2[4];
+ for (int i = 0; i < 4; i++) {
+ vec1[i] = pts1[(i + 1) % 4] - pts1[i];
+ vec2[i] = pts2[(i + 1) % 4] - pts2[i];
+ }
+
+ // Line test - test all line combos for intersection
+ int num = 0; // number of intersections
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ // Solve for 2x2 Ax=b
+ T det = cross_2d(vec2[j], vec1[i]);
+
+ // This takes care of parallel lines
+ if (fabs(det) <= 1e-14) {
+ continue;
+ }
+
+ auto vec12 = pts2[j] - pts1[i];
+
+ T t1 = cross_2d(vec2[j], vec12) / det;
+ T t2 = cross_2d(vec1[i], vec12) / det;
+
+ if (t1 >= 0.0f && t1 <= 1.0f && t2 >= 0.0f && t2 <= 1.0f) {
+ intersections[num++] = pts1[i] + vec1[i] * t1;
+ }
+ }
+ }
+
+ // Check for vertices of rect1 inside rect2
+ {
+ const auto& AB = vec2[0];
+ const auto& DA = vec2[3];
+ auto ABdotAB = dot_2d(AB, AB);
+ auto ADdotAD = dot_2d(DA, DA);
+ for (int i = 0; i < 4; i++) {
+ // assume ABCD is the rectangle, and P is the point to be judged
+ // P is inside ABCD iff. P's projection on AB lies within AB
+ // and P's projection on AD lies within AD
+
+ auto AP = pts1[i] - pts2[0];
+
+ auto APdotAB = dot_2d(AP, AB);
+ auto APdotAD = -dot_2d(AP, DA);
+
+ if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) &&
+ (APdotAD <= ADdotAD)) {
+ intersections[num++] = pts1[i];
+ }
+ }
+ }
+
+ // Reverse the check - check for vertices of rect2 inside rect1
+ {
+ const auto& AB = vec1[0];
+ const auto& DA = vec1[3];
+ auto ABdotAB = dot_2d(AB, AB);
+ auto ADdotAD = dot_2d(DA, DA);
+ for (int i = 0; i < 4; i++) {
+ auto AP = pts2[i] - pts1[0];
+
+ auto APdotAB = dot_2d(AP, AB);
+ auto APdotAD = -dot_2d(AP, DA);
+
+ if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) &&
+ (APdotAD <= ADdotAD)) {
+ intersections[num++] = pts2[i];
+ }
+ }
+ }
+
+ return num;
+}
+
+template
+HOST_DEVICE_INLINE int convex_hull_graham(
+ const Point (&p)[24],
+ const int& num_in,
+ Point (&q)[24],
+ bool shift_to_zero = false) {
+ assert(num_in >= 2);
+
+ // Step 1:
+ // Find point with minimum y
+ // if more than 1 points have the same minimum y,
+ // pick the one with the minimum x.
+ int t = 0;
+ for (int i = 1; i < num_in; i++) {
+ if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) {
+ t = i;
+ }
+ }
+ auto& start = p[t]; // starting point
+
+ // Step 2:
+ // Subtract starting point from every points (for sorting in the next step)
+ for (int i = 0; i < num_in; i++) {
+ q[i] = p[i] - start;
+ }
+
+ // Swap the starting point to position 0
+ auto tmp = q[0];
+ q[0] = q[t];
+ q[t] = tmp;
+
+ // Step 3:
+ // Sort point 1 ~ num_in according to their relative cross-product values
+ // (essentially sorting according to angles)
+ // If the angles are the same, sort according to their distance to origin
+ T dist[24];
+ for (int i = 0; i < num_in; i++) {
+ dist[i] = dot_2d(q[i], q[i]);
+ }
+
+#ifdef __CUDACC__
+ // CUDA version
+ // In the future, we can potentially use thrust
+ // for sorting here to improve speed (though not guaranteed)
+ for (int i = 1; i < num_in - 1; i++) {
+ for (int j = i + 1; j < num_in; j++) {
+ T crossProduct = cross_2d(q[i], q[j]);
+ if ((crossProduct < -1e-6) ||
+ (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) {
+ auto q_tmp = q[i];
+ q[i] = q[j];
+ q[j] = q_tmp;
+ auto dist_tmp = dist[i];
+ dist[i] = dist[j];
+ dist[j] = dist_tmp;
+ }
+ }
+ }
+#else
+ // CPU version
+ std::sort(
+ q + 1, q + num_in, [](const Point& A, const Point& B) -> bool {
+ T temp = cross_2d(A, B);
+ if (fabs(temp) < 1e-6) {
+ return dot_2d(A, A) < dot_2d(B, B);
+ } else {
+ return temp > 0;
+ }
+ });
+#endif
+
+ // Step 4:
+ // Make sure there are at least 2 points (that don't overlap with each other)
+ // in the stack
+ int k; // index of the non-overlapped second point
+ for (k = 1; k < num_in; k++) {
+ if (dist[k] > 1e-8) {
+ break;
+ }
+ }
+ if (k == num_in) {
+ // We reach the end, which means the convex hull is just one point
+ q[0] = p[t];
+ return 1;
+ }
+ q[1] = q[k];
+ int m = 2; // 2 points in the stack
+ // Step 5:
+ // Finally we can start the scanning process.
+ // When a non-convex relationship between the 3 points is found
+ // (either concave shape or duplicated points),
+ // we pop the previous point from the stack
+ // until the 3-point relationship is convex again, or
+ // until the stack only contains two points
+ for (int i = k + 1; i < num_in; i++) {
+ while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2]) >= 0) {
+ m--;
+ }
+ q[m++] = q[i];
+ }
+
+ // Step 6 (Optional):
+ // In general sense we need the original coordinates, so we
+ // need to shift the points back (reverting Step 2)
+ // But if we're only interested in getting the area/perimeter of the shape
+ // We can simply return.
+ if (!shift_to_zero) {
+ for (int i = 0; i < m; i++) {
+ q[i] += start;
+ }
+ }
+
+ return m;
+}
+
+template
+HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) {
+ if (m <= 2) {
+ return 0;
+ }
+
+ T area = 0;
+ for (int i = 1; i < m - 1; i++) {
+ area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0]));
+ }
+
+ return area / 2.0;
+}
+
+template
+HOST_DEVICE_INLINE T rboxes_intersection(
+ const RotatedBox& box1,
+ const RotatedBox& box2) {
+ // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned
+ // from rotated_rect_intersection_pts
+ Point intersectPts[24], orderedPts[24];
+
+ Point pts1[4];
+ Point pts2[4];
+ get_rotated_vertices(box1, pts1);
+ get_rotated_vertices(box2, pts2);
+
+ int num = get_intersection_points(pts1, pts2, intersectPts);
+
+ if (num <= 2) {
+ return 0.0;
+ }
+
+ // Convex Hull to order the intersection points in clockwise order and find
+ // the contour area.
+ int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true);
+ return polygon_area(orderedPts, num_convex);
+}
+
+} // namespace
+
+template
+HOST_DEVICE_INLINE T
+rbox_iou_single(T const* const box1_raw, T const* const box2_raw) {
+ // shift center to the middle point to achieve higher precision in result
+ RotatedBox box1, box2;
+ auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0;
+ auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0;
+ box1.x_ctr = box1_raw[0] - center_shift_x;
+ box1.y_ctr = box1_raw[1] - center_shift_y;
+ box1.w = box1_raw[2];
+ box1.h = box1_raw[3];
+ box1.a = box1_raw[4];
+ box2.x_ctr = box2_raw[0] - center_shift_x;
+ box2.y_ctr = box2_raw[1] - center_shift_y;
+ box2.w = box2_raw[2];
+ box2.h = box2_raw[3];
+ box2.a = box2_raw[4];
+
+ const T area1 = box1.w * box1.h;
+ const T area2 = box2.w * box2.h;
+ if (area1 < 1e-14 || area2 < 1e-14) {
+ return 0.f;
+ }
+
+ const T intersection = rboxes_intersection(box1, box2);
+ const T iou = intersection / (area1 + area2 - intersection);
+ return iou;
+}
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/setup.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/setup.py
new file mode 100644
index 000000000..d364db7ed
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/setup.py
@@ -0,0 +1,14 @@
+import paddle
+from paddle.utils.cpp_extension import CppExtension, CUDAExtension, setup
+
+if __name__ == "__main__":
+ if paddle.device.is_compiled_with_cuda():
+ setup(
+ name='rbox_iou_ops',
+ ext_modules=CUDAExtension(
+ sources=['rbox_iou_op.cc', 'rbox_iou_op.cu'],
+ extra_compile_args={'cxx': ['-DPADDLE_WITH_CUDA']}))
+ else:
+ setup(
+ name='rbox_iou_ops',
+ ext_modules=CppExtension(sources=['rbox_iou_op.cc']))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/test.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/test.py
new file mode 100644
index 000000000..85872e484
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/ext_op/test.py
@@ -0,0 +1,156 @@
+import numpy as np
+import sys
+import time
+from shapely.geometry import Polygon
+import paddle
+import unittest
+
+try:
+ from rbox_iou_ops import rbox_iou
+except Exception as e:
+ print('import rbox_iou_ops error', e)
+ sys.exit(-1)
+
+
+def rbox2poly_single(rrect, get_best_begin_point=False):
+ """
+ rrect:[x_ctr,y_ctr,w,h,angle]
+ to
+ poly:[x0,y0,x1,y1,x2,y2,x3,y3]
+ """
+ x_ctr, y_ctr, width, height, angle = rrect[:5]
+ tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2
+ # rect 2x4
+ rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])
+ R = np.array([[np.cos(angle), -np.sin(angle)],
+ [np.sin(angle), np.cos(angle)]])
+ # poly
+ poly = R.dot(rect)
+ x0, x1, x2, x3 = poly[0, :4] + x_ctr
+ y0, y1, y2, y3 = poly[1, :4] + y_ctr
+ poly = np.array([x0, y0, x1, y1, x2, y2, x3, y3], dtype=np.float64)
+ return poly
+
+
+def intersection(g, p):
+ """
+ Intersection.
+ """
+
+ g = g[:8].reshape((4, 2))
+ p = p[:8].reshape((4, 2))
+
+ a = g
+ b = p
+
+ use_filter = True
+ if use_filter:
+ # step1:
+ inter_x1 = np.maximum(np.min(a[:, 0]), np.min(b[:, 0]))
+ inter_x2 = np.minimum(np.max(a[:, 0]), np.max(b[:, 0]))
+ inter_y1 = np.maximum(np.min(a[:, 1]), np.min(b[:, 1]))
+ inter_y2 = np.minimum(np.max(a[:, 1]), np.max(b[:, 1]))
+ if inter_x1 >= inter_x2 or inter_y1 >= inter_y2:
+ return 0.
+ x1 = np.minimum(np.min(a[:, 0]), np.min(b[:, 0]))
+ x2 = np.maximum(np.max(a[:, 0]), np.max(b[:, 0]))
+ y1 = np.minimum(np.min(a[:, 1]), np.min(b[:, 1]))
+ y2 = np.maximum(np.max(a[:, 1]), np.max(b[:, 1]))
+ if x1 >= x2 or y1 >= y2 or (x2 - x1) < 2 or (y2 - y1) < 2:
+ return 0.
+
+ g = Polygon(g)
+ p = Polygon(p)
+ if not g.is_valid or not p.is_valid:
+ return 0
+
+ inter = Polygon(g).intersection(Polygon(p)).area
+ union = g.area + p.area - inter
+ if union == 0:
+ return 0
+ else:
+ return inter / union
+
+
+def rbox_overlaps(anchors, gt_bboxes, use_cv2=False):
+ """
+
+ Args:
+ anchors: [NA, 5] x1,y1,x2,y2,angle
+ gt_bboxes: [M, 5] x1,y1,x2,y2,angle
+
+ Returns:
+
+ """
+ assert anchors.shape[1] == 5
+ assert gt_bboxes.shape[1] == 5
+
+ gt_bboxes_ploy = [rbox2poly_single(e) for e in gt_bboxes]
+ anchors_ploy = [rbox2poly_single(e) for e in anchors]
+
+ num_gt, num_anchors = len(gt_bboxes_ploy), len(anchors_ploy)
+ iou = np.zeros((num_gt, num_anchors), dtype=np.float64)
+
+ start_time = time.time()
+ for i in range(num_gt):
+ for j in range(num_anchors):
+ try:
+ iou[i, j] = intersection(gt_bboxes_ploy[i], anchors_ploy[j])
+ except Exception as e:
+ print('cur gt_bboxes_ploy[i]', gt_bboxes_ploy[i],
+ 'anchors_ploy[j]', anchors_ploy[j], e)
+ iou = iou.T
+ return iou
+
+
+def gen_sample(n):
+ rbox = np.random.rand(n, 5)
+ rbox[:, 0:4] = rbox[:, 0:4] * 0.45 + 0.001
+ rbox[:, 4] = rbox[:, 4] - 0.5
+ return rbox
+
+
+class RBoxIoUTest(unittest.TestCase):
+ def setUp(self):
+ self.initTestCase()
+ self.rbox1 = gen_sample(self.n)
+ self.rbox2 = gen_sample(self.m)
+
+ def initTestCase(self):
+ self.n = 13000
+ self.m = 7
+
+ def assertAllClose(self, x, y, msg, atol=5e-1, rtol=1e-2):
+ self.assertTrue(np.allclose(x, y, atol=atol, rtol=rtol), msg=msg)
+
+ def get_places(self):
+ places = [paddle.CPUPlace()]
+ if paddle.device.is_compiled_with_cuda():
+ places.append(paddle.CUDAPlace(0))
+
+ return places
+
+ def check_output(self, place):
+ paddle.disable_static()
+ pd_rbox1 = paddle.to_tensor(self.rbox1, place=place)
+ pd_rbox2 = paddle.to_tensor(self.rbox2, place=place)
+ actual_t = rbox_iou(pd_rbox1, pd_rbox2).numpy()
+ poly_rbox1 = self.rbox1
+ poly_rbox2 = self.rbox2
+ poly_rbox1[:, 0:4] = self.rbox1[:, 0:4] * 1024
+ poly_rbox2[:, 0:4] = self.rbox2[:, 0:4] * 1024
+ expect_t = rbox_overlaps(poly_rbox1, poly_rbox2, use_cv2=False)
+ self.assertAllClose(
+ actual_t,
+ expect_t,
+ msg="rbox_iou has diff at {} \nExpect {}\nBut got {}".format(
+ str(place), str(expect_t), str(actual_t)))
+
+ def test_output(self):
+ places = self.get_places()
+ for place in places:
+ self.check_output(place)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__init__.py
new file mode 100644
index 000000000..d69e8af0f
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__init__.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import metrics
+from . import keypoint_metrics
+
+from .metrics import *
+from .keypoint_metrics import *
+
+__all__ = metrics.__all__ + keypoint_metrics.__all__
+
+from . import mot_metrics
+from .mot_metrics import *
+__all__ = metrics.__all__ + mot_metrics.__all__
+
+from . import mcmot_metrics
+from .mcmot_metrics import *
+__all__ = metrics.__all__ + mcmot_metrics.__all__
\ No newline at end of file
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..08ea6f44d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/coco_utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/coco_utils.cpython-37.pyc
new file mode 100644
index 000000000..c55b3d121
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/coco_utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/json_results.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/json_results.cpython-37.pyc
new file mode 100644
index 000000000..d99601c30
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/json_results.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/keypoint_metrics.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/keypoint_metrics.cpython-37.pyc
new file mode 100644
index 000000000..3ea557dda
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/keypoint_metrics.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/map_utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/map_utils.cpython-37.pyc
new file mode 100644
index 000000000..c2a9ed2f0
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/map_utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/mcmot_metrics.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/mcmot_metrics.cpython-37.pyc
new file mode 100644
index 000000000..1390aa87b
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/mcmot_metrics.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/metrics.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/metrics.cpython-37.pyc
new file mode 100644
index 000000000..0a4895277
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/metrics.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/mot_metrics.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/mot_metrics.cpython-37.pyc
new file mode 100644
index 000000000..683014969
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/mot_metrics.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/munkres.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/munkres.cpython-37.pyc
new file mode 100644
index 000000000..c00df5d8f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/munkres.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/widerface_utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/widerface_utils.cpython-37.pyc
new file mode 100644
index 000000000..bd9959299
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/__pycache__/widerface_utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/coco_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/coco_utils.py
new file mode 100644
index 000000000..47b92bc62
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/coco_utils.py
@@ -0,0 +1,184 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+import numpy as np
+import itertools
+
+from ppdet.metrics.json_results import get_det_res, get_det_poly_res, get_seg_res, get_solov2_segm_res, get_keypoint_res
+from ppdet.metrics.map_utils import draw_pr_curve
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+def get_infer_results(outs, catid, bias=0):
+ """
+ Get result at the stage of inference.
+ The output format is dictionary containing bbox or mask result.
+
+ For example, bbox result is a list and each element contains
+ image_id, category_id, bbox and score.
+ """
+ if outs is None or len(outs) == 0:
+ raise ValueError(
+ 'The number of valid detection result if zero. Please use reasonable model and check input data.'
+ )
+
+ im_id = outs['im_id']
+
+ infer_res = {}
+ if 'bbox' in outs:
+ if len(outs['bbox']) > 0 and len(outs['bbox'][0]) > 6:
+ infer_res['bbox'] = get_det_poly_res(
+ outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
+ else:
+ infer_res['bbox'] = get_det_res(
+ outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
+
+ if 'mask' in outs:
+ # mask post process
+ infer_res['mask'] = get_seg_res(outs['mask'], outs['bbox'],
+ outs['bbox_num'], im_id, catid)
+
+ if 'segm' in outs:
+ infer_res['segm'] = get_solov2_segm_res(outs, im_id, catid)
+
+ if 'keypoint' in outs:
+ infer_res['keypoint'] = get_keypoint_res(outs, im_id)
+ outs['bbox_num'] = [len(infer_res['keypoint'])]
+
+ return infer_res
+
+
+def cocoapi_eval(jsonfile,
+ style,
+ coco_gt=None,
+ anno_file=None,
+ max_dets=(100, 300, 1000),
+ classwise=False,
+ sigmas=None,
+ use_area=True):
+ """
+ Args:
+ jsonfile (str): Evaluation json file, eg: bbox.json, mask.json.
+ style (str): COCOeval style, can be `bbox` , `segm` , `proposal`, `keypoints` and `keypoints_crowd`.
+ coco_gt (str): Whether to load COCOAPI through anno_file,
+ eg: coco_gt = COCO(anno_file)
+ anno_file (str): COCO annotations file.
+ max_dets (tuple): COCO evaluation maxDets.
+ classwise (bool): Whether per-category AP and draw P-R Curve or not.
+ sigmas (nparray): keypoint labelling sigmas.
+ use_area (bool): If gt annotations (eg. CrowdPose, AIC)
+ do not have 'area', please set use_area=False.
+ """
+ assert coco_gt != None or anno_file != None
+ if style == 'keypoints_crowd':
+ #please install xtcocotools==1.6
+ from xtcocotools.coco import COCO
+ from xtcocotools.cocoeval import COCOeval
+ else:
+ from pycocotools.coco import COCO
+ from pycocotools.cocoeval import COCOeval
+
+ if coco_gt == None:
+ coco_gt = COCO(anno_file)
+ logger.info("Start evaluate...")
+ coco_dt = coco_gt.loadRes(jsonfile)
+ if style == 'proposal':
+ coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
+ coco_eval.params.useCats = 0
+ coco_eval.params.maxDets = list(max_dets)
+ elif style == 'keypoints_crowd':
+ coco_eval = COCOeval(coco_gt, coco_dt, style, sigmas, use_area)
+ else:
+ coco_eval = COCOeval(coco_gt, coco_dt, style)
+ coco_eval.evaluate()
+ coco_eval.accumulate()
+ coco_eval.summarize()
+ if classwise:
+ # Compute per-category AP and PR curve
+ try:
+ from terminaltables import AsciiTable
+ except Exception as e:
+ logger.error(
+ 'terminaltables not found, plaese install terminaltables. '
+ 'for example: `pip install terminaltables`.')
+ raise e
+ precisions = coco_eval.eval['precision']
+ cat_ids = coco_gt.getCatIds()
+ # precision: (iou, recall, cls, area range, max dets)
+ assert len(cat_ids) == precisions.shape[2]
+ results_per_category = []
+ for idx, catId in enumerate(cat_ids):
+ # area range index 0: all area ranges
+ # max dets index -1: typically 100 per image
+ nm = coco_gt.loadCats(catId)[0]
+ precision = precisions[:, :, idx, 0, -1]
+ precision = precision[precision > -1]
+ if precision.size:
+ ap = np.mean(precision)
+ else:
+ ap = float('nan')
+ results_per_category.append(
+ (str(nm["name"]), '{:0.3f}'.format(float(ap))))
+ pr_array = precisions[0, :, idx, 0, 2]
+ recall_array = np.arange(0.0, 1.01, 0.01)
+ draw_pr_curve(
+ pr_array,
+ recall_array,
+ out_dir=style + '_pr_curve',
+ file_name='{}_precision_recall_curve.jpg'.format(nm["name"]))
+
+ num_columns = min(6, len(results_per_category) * 2)
+ results_flatten = list(itertools.chain(*results_per_category))
+ headers = ['category', 'AP'] * (num_columns // 2)
+ results_2d = itertools.zip_longest(
+ *[results_flatten[i::num_columns] for i in range(num_columns)])
+ table_data = [headers]
+ table_data += [result for result in results_2d]
+ table = AsciiTable(table_data)
+ logger.info('Per-category of {} AP: \n{}'.format(style, table.table))
+ logger.info("per-category PR curve has output to {} folder.".format(
+ style + '_pr_curve'))
+ # flush coco evaluation result
+ sys.stdout.flush()
+ return coco_eval.stats
+
+
+def json_eval_results(metric, json_directory, dataset):
+ """
+ cocoapi eval with already exists proposal.json, bbox.json or mask.json
+ """
+ assert metric == 'COCO'
+ anno_file = dataset.get_anno()
+ json_file_list = ['proposal.json', 'bbox.json', 'mask.json']
+ if json_directory:
+ assert os.path.exists(
+ json_directory), "The json directory:{} does not exist".format(
+ json_directory)
+ for k, v in enumerate(json_file_list):
+ json_file_list[k] = os.path.join(str(json_directory), v)
+
+ coco_eval_style = ['proposal', 'bbox', 'segm']
+ for i, v_json in enumerate(json_file_list):
+ if os.path.exists(v_json):
+ cocoapi_eval(v_json, coco_eval_style[i], anno_file=anno_file)
+ else:
+ logger.info("{} not exists!".format(v_json))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/json_results.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/json_results.py
new file mode 100644
index 000000000..c703de63b
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/json_results.py
@@ -0,0 +1,149 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import six
+import numpy as np
+
+
+def get_det_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
+ det_res = []
+ k = 0
+ for i in range(len(bbox_nums)):
+ cur_image_id = int(image_id[i][0])
+ det_nums = bbox_nums[i]
+ for j in range(det_nums):
+ dt = bboxes[k]
+ k = k + 1
+ num_id, score, xmin, ymin, xmax, ymax = dt.tolist()
+ if int(num_id) < 0:
+ continue
+ category_id = label_to_cat_id_map[int(num_id)]
+ w = xmax - xmin + bias
+ h = ymax - ymin + bias
+ bbox = [xmin, ymin, w, h]
+ dt_res = {
+ 'image_id': cur_image_id,
+ 'category_id': category_id,
+ 'bbox': bbox,
+ 'score': score
+ }
+ det_res.append(dt_res)
+ return det_res
+
+
+def get_det_poly_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
+ det_res = []
+ k = 0
+ for i in range(len(bbox_nums)):
+ cur_image_id = int(image_id[i][0])
+ det_nums = bbox_nums[i]
+ for j in range(det_nums):
+ dt = bboxes[k]
+ k = k + 1
+ num_id, score, x1, y1, x2, y2, x3, y3, x4, y4 = dt.tolist()
+ if int(num_id) < 0:
+ continue
+ category_id = label_to_cat_id_map[int(num_id)]
+ rbox = [x1, y1, x2, y2, x3, y3, x4, y4]
+ dt_res = {
+ 'image_id': cur_image_id,
+ 'category_id': category_id,
+ 'bbox': rbox,
+ 'score': score
+ }
+ det_res.append(dt_res)
+ return det_res
+
+
+def get_seg_res(masks, bboxes, mask_nums, image_id, label_to_cat_id_map):
+ import pycocotools.mask as mask_util
+ seg_res = []
+ k = 0
+ for i in range(len(mask_nums)):
+ cur_image_id = int(image_id[i][0])
+ det_nums = mask_nums[i]
+ for j in range(det_nums):
+ mask = masks[k].astype(np.uint8)
+ score = float(bboxes[k][1])
+ label = int(bboxes[k][0])
+ k = k + 1
+ if label == -1:
+ continue
+ cat_id = label_to_cat_id_map[label]
+ rle = mask_util.encode(
+ np.array(
+ mask[:, :, None], order="F", dtype="uint8"))[0]
+ if six.PY3:
+ if 'counts' in rle:
+ rle['counts'] = rle['counts'].decode("utf8")
+ sg_res = {
+ 'image_id': cur_image_id,
+ 'category_id': cat_id,
+ 'segmentation': rle,
+ 'score': score
+ }
+ seg_res.append(sg_res)
+ return seg_res
+
+
+def get_solov2_segm_res(results, image_id, num_id_to_cat_id_map):
+ import pycocotools.mask as mask_util
+ segm_res = []
+ # for each batch
+ segms = results['segm'].astype(np.uint8)
+ clsid_labels = results['cate_label']
+ clsid_scores = results['cate_score']
+ lengths = segms.shape[0]
+ im_id = int(image_id[0][0])
+ if lengths == 0 or segms is None:
+ return None
+ # for each sample
+ for i in range(lengths - 1):
+ clsid = int(clsid_labels[i])
+ catid = num_id_to_cat_id_map[clsid]
+ score = float(clsid_scores[i])
+ mask = segms[i]
+ segm = mask_util.encode(np.array(mask[:, :, np.newaxis], order='F'))[0]
+ segm['counts'] = segm['counts'].decode('utf8')
+ coco_res = {
+ 'image_id': im_id,
+ 'category_id': catid,
+ 'segmentation': segm,
+ 'score': score
+ }
+ segm_res.append(coco_res)
+ return segm_res
+
+
+def get_keypoint_res(results, im_id):
+ anns = []
+ preds = results['keypoint']
+ for idx in range(im_id.shape[0]):
+ image_id = im_id[idx].item()
+ kpts, scores = preds[idx]
+ for kpt, score in zip(kpts, scores):
+ kpt = kpt.flatten()
+ ann = {
+ 'image_id': image_id,
+ 'category_id': 1, # XXX hard code
+ 'keypoints': kpt.tolist(),
+ 'score': float(score)
+ }
+ x = kpt[0::3]
+ y = kpt[1::3]
+ x0, x1, y0, y1 = np.min(x).item(), np.max(x).item(), np.min(y).item(
+ ), np.max(y).item()
+ ann['area'] = (x1 - x0) * (y1 - y0)
+ ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
+ anns.append(ann)
+ return anns
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/keypoint_metrics.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/keypoint_metrics.py
new file mode 100644
index 000000000..d8bc0e782
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/keypoint_metrics.py
@@ -0,0 +1,402 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import json
+from collections import defaultdict, OrderedDict
+import numpy as np
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+from ..modeling.keypoint_utils import oks_nms
+from scipy.io import loadmat, savemat
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['KeyPointTopDownCOCOEval', 'KeyPointTopDownMPIIEval']
+
+
+class KeyPointTopDownCOCOEval(object):
+ '''
+ Adapted from
+ https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
+ Copyright (c) Microsoft, under the MIT License.
+ '''
+
+ def __init__(self,
+ anno_file,
+ num_samples,
+ num_joints,
+ output_eval,
+ iou_type='keypoints',
+ in_vis_thre=0.2,
+ oks_thre=0.9,
+ save_prediction_only=False):
+ super(KeyPointTopDownCOCOEval, self).__init__()
+ self.coco = COCO(anno_file)
+ self.num_samples = num_samples
+ self.num_joints = num_joints
+ self.iou_type = iou_type
+ self.in_vis_thre = in_vis_thre
+ self.oks_thre = oks_thre
+ self.output_eval = output_eval
+ self.res_file = os.path.join(output_eval, "keypoints_results.json")
+ self.save_prediction_only = save_prediction_only
+ self.reset()
+
+ def reset(self):
+ self.results = {
+ 'all_preds': np.zeros(
+ (self.num_samples, self.num_joints, 3), dtype=np.float32),
+ 'all_boxes': np.zeros((self.num_samples, 6)),
+ 'image_path': []
+ }
+ self.eval_results = {}
+ self.idx = 0
+
+ def update(self, inputs, outputs):
+ kpts, _ = outputs['keypoint'][0]
+
+ num_images = inputs['image'].shape[0]
+ self.results['all_preds'][self.idx:self.idx + num_images, :, 0:
+ 3] = kpts[:, :, 0:3]
+ self.results['all_boxes'][self.idx:self.idx + num_images, 0:2] = inputs[
+ 'center'].numpy()[:, 0:2]
+ self.results['all_boxes'][self.idx:self.idx + num_images, 2:4] = inputs[
+ 'scale'].numpy()[:, 0:2]
+ self.results['all_boxes'][self.idx:self.idx + num_images, 4] = np.prod(
+ inputs['scale'].numpy() * 200, 1)
+ self.results['all_boxes'][self.idx:self.idx + num_images,
+ 5] = np.squeeze(inputs['score'].numpy())
+ self.results['image_path'].extend(inputs['im_id'].numpy())
+
+ self.idx += num_images
+
+ def _write_coco_keypoint_results(self, keypoints):
+ data_pack = [{
+ 'cat_id': 1,
+ 'cls': 'person',
+ 'ann_type': 'keypoints',
+ 'keypoints': keypoints
+ }]
+ results = self._coco_keypoint_results_one_category_kernel(data_pack[0])
+ if not os.path.exists(self.output_eval):
+ os.makedirs(self.output_eval)
+ with open(self.res_file, 'w') as f:
+ json.dump(results, f, sort_keys=True, indent=4)
+ logger.info(f'The keypoint result is saved to {self.res_file}.')
+ try:
+ json.load(open(self.res_file))
+ except Exception:
+ content = []
+ with open(self.res_file, 'r') as f:
+ for line in f:
+ content.append(line)
+ content[-1] = ']'
+ with open(self.res_file, 'w') as f:
+ for c in content:
+ f.write(c)
+
+ def _coco_keypoint_results_one_category_kernel(self, data_pack):
+ cat_id = data_pack['cat_id']
+ keypoints = data_pack['keypoints']
+ cat_results = []
+
+ for img_kpts in keypoints:
+ if len(img_kpts) == 0:
+ continue
+
+ _key_points = np.array(
+ [img_kpts[k]['keypoints'] for k in range(len(img_kpts))])
+ _key_points = _key_points.reshape(_key_points.shape[0], -1)
+
+ result = [{
+ 'image_id': img_kpts[k]['image'],
+ 'category_id': cat_id,
+ 'keypoints': _key_points[k].tolist(),
+ 'score': img_kpts[k]['score'],
+ 'center': list(img_kpts[k]['center']),
+ 'scale': list(img_kpts[k]['scale'])
+ } for k in range(len(img_kpts))]
+ cat_results.extend(result)
+
+ return cat_results
+
+ def get_final_results(self, preds, all_boxes, img_path):
+ _kpts = []
+ for idx, kpt in enumerate(preds):
+ _kpts.append({
+ 'keypoints': kpt,
+ 'center': all_boxes[idx][0:2],
+ 'scale': all_boxes[idx][2:4],
+ 'area': all_boxes[idx][4],
+ 'score': all_boxes[idx][5],
+ 'image': int(img_path[idx])
+ })
+ # image x person x (keypoints)
+ kpts = defaultdict(list)
+ for kpt in _kpts:
+ kpts[kpt['image']].append(kpt)
+
+ # rescoring and oks nms
+ num_joints = preds.shape[1]
+ in_vis_thre = self.in_vis_thre
+ oks_thre = self.oks_thre
+ oks_nmsed_kpts = []
+ for img in kpts.keys():
+ img_kpts = kpts[img]
+ for n_p in img_kpts:
+ box_score = n_p['score']
+ kpt_score = 0
+ valid_num = 0
+ for n_jt in range(0, num_joints):
+ t_s = n_p['keypoints'][n_jt][2]
+ if t_s > in_vis_thre:
+ kpt_score = kpt_score + t_s
+ valid_num = valid_num + 1
+ if valid_num != 0:
+ kpt_score = kpt_score / valid_num
+ # rescoring
+ n_p['score'] = kpt_score * box_score
+
+ keep = oks_nms([img_kpts[i] for i in range(len(img_kpts))],
+ oks_thre)
+
+ if len(keep) == 0:
+ oks_nmsed_kpts.append(img_kpts)
+ else:
+ oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])
+
+ self._write_coco_keypoint_results(oks_nmsed_kpts)
+
+ def accumulate(self):
+ self.get_final_results(self.results['all_preds'],
+ self.results['all_boxes'],
+ self.results['image_path'])
+ if self.save_prediction_only:
+ logger.info(f'The keypoint result is saved to {self.res_file} '
+ 'and do not evaluate the mAP.')
+ return
+ coco_dt = self.coco.loadRes(self.res_file)
+ coco_eval = COCOeval(self.coco, coco_dt, 'keypoints')
+ coco_eval.params.useSegm = None
+ coco_eval.evaluate()
+ coco_eval.accumulate()
+ coco_eval.summarize()
+
+ keypoint_stats = []
+ for ind in range(len(coco_eval.stats)):
+ keypoint_stats.append((coco_eval.stats[ind]))
+ self.eval_results['keypoint'] = keypoint_stats
+
+ def log(self):
+ if self.save_prediction_only:
+ return
+ stats_names = [
+ 'AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5',
+ 'AR .75', 'AR (M)', 'AR (L)'
+ ]
+ num_values = len(stats_names)
+ print(' '.join(['| {}'.format(name) for name in stats_names]) + ' |')
+ print('|---' * (num_values + 1) + '|')
+
+ print(' '.join([
+ '| {:.3f}'.format(value) for value in self.eval_results['keypoint']
+ ]) + ' |')
+
+ def get_results(self):
+ return self.eval_results
+
+
+class KeyPointTopDownMPIIEval(object):
+ def __init__(self,
+ anno_file,
+ num_samples,
+ num_joints,
+ output_eval,
+ oks_thre=0.9,
+ save_prediction_only=False):
+ super(KeyPointTopDownMPIIEval, self).__init__()
+ self.ann_file = anno_file
+ self.res_file = os.path.join(output_eval, "keypoints_results.json")
+ self.save_prediction_only = save_prediction_only
+ self.reset()
+
+ def reset(self):
+ self.results = []
+ self.eval_results = {}
+ self.idx = 0
+
+ def update(self, inputs, outputs):
+ kpts, _ = outputs['keypoint'][0]
+
+ num_images = inputs['image'].shape[0]
+ results = {}
+ results['preds'] = kpts[:, :, 0:3]
+ results['boxes'] = np.zeros((num_images, 6))
+ results['boxes'][:, 0:2] = inputs['center'].numpy()[:, 0:2]
+ results['boxes'][:, 2:4] = inputs['scale'].numpy()[:, 0:2]
+ results['boxes'][:, 4] = np.prod(inputs['scale'].numpy() * 200, 1)
+ results['boxes'][:, 5] = np.squeeze(inputs['score'].numpy())
+ results['image_path'] = inputs['image_file']
+
+ self.results.append(results)
+
+ def accumulate(self):
+ self._mpii_keypoint_results_save()
+ if self.save_prediction_only:
+ logger.info(f'The keypoint result is saved to {self.res_file} '
+ 'and do not evaluate the mAP.')
+ return
+
+ self.eval_results = self.evaluate(self.results)
+
+ def _mpii_keypoint_results_save(self):
+ results = []
+ for res in self.results:
+ if len(res) == 0:
+ continue
+ result = [{
+ 'preds': res['preds'][k].tolist(),
+ 'boxes': res['boxes'][k].tolist(),
+ 'image_path': res['image_path'][k],
+ } for k in range(len(res))]
+ results.extend(result)
+ with open(self.res_file, 'w') as f:
+ json.dump(results, f, sort_keys=True, indent=4)
+ logger.info(f'The keypoint result is saved to {self.res_file}.')
+
+ def log(self):
+ if self.save_prediction_only:
+ return
+ for item, value in self.eval_results.items():
+ print("{} : {}".format(item, value))
+
+ def get_results(self):
+ return self.eval_results
+
+ def evaluate(self, outputs, savepath=None):
+ """Evaluate PCKh for MPII dataset. Adapted from
+ https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
+ Copyright (c) Microsoft, under the MIT License.
+
+ Args:
+ outputs(list(preds, boxes)):
+
+ * preds (np.ndarray[N,K,3]): The first two dimensions are
+ coordinates, score is the third dimension of the array.
+ * boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]
+ , scale[1],area, score]
+
+ Returns:
+ dict: PCKh for each joint
+ """
+
+ kpts = []
+ for output in outputs:
+ preds = output['preds']
+ batch_size = preds.shape[0]
+ for i in range(batch_size):
+ kpts.append({'keypoints': preds[i]})
+
+ preds = np.stack([kpt['keypoints'] for kpt in kpts])
+
+ # convert 0-based index to 1-based index,
+ # and get the first two dimensions.
+ preds = preds[..., :2] + 1.0
+
+ if savepath is not None:
+ pred_file = os.path.join(savepath, 'pred.mat')
+ savemat(pred_file, mdict={'preds': preds})
+
+ SC_BIAS = 0.6
+ threshold = 0.5
+
+ gt_file = os.path.join(
+ os.path.dirname(self.ann_file), 'mpii_gt_val.mat')
+ gt_dict = loadmat(gt_file)
+ dataset_joints = gt_dict['dataset_joints']
+ jnt_missing = gt_dict['jnt_missing']
+ pos_gt_src = gt_dict['pos_gt_src']
+ headboxes_src = gt_dict['headboxes_src']
+
+ pos_pred_src = np.transpose(preds, [1, 2, 0])
+
+ head = np.where(dataset_joints == 'head')[1][0]
+ lsho = np.where(dataset_joints == 'lsho')[1][0]
+ lelb = np.where(dataset_joints == 'lelb')[1][0]
+ lwri = np.where(dataset_joints == 'lwri')[1][0]
+ lhip = np.where(dataset_joints == 'lhip')[1][0]
+ lkne = np.where(dataset_joints == 'lkne')[1][0]
+ lank = np.where(dataset_joints == 'lank')[1][0]
+
+ rsho = np.where(dataset_joints == 'rsho')[1][0]
+ relb = np.where(dataset_joints == 'relb')[1][0]
+ rwri = np.where(dataset_joints == 'rwri')[1][0]
+ rkne = np.where(dataset_joints == 'rkne')[1][0]
+ rank = np.where(dataset_joints == 'rank')[1][0]
+ rhip = np.where(dataset_joints == 'rhip')[1][0]
+
+ jnt_visible = 1 - jnt_missing
+ uv_error = pos_pred_src - pos_gt_src
+ uv_err = np.linalg.norm(uv_error, axis=1)
+ headsizes = headboxes_src[1, :, :] - headboxes_src[0, :, :]
+ headsizes = np.linalg.norm(headsizes, axis=0)
+ headsizes *= SC_BIAS
+ scale = headsizes * np.ones((len(uv_err), 1), dtype=np.float32)
+ scaled_uv_err = uv_err / scale
+ scaled_uv_err = scaled_uv_err * jnt_visible
+ jnt_count = np.sum(jnt_visible, axis=1)
+ less_than_threshold = (scaled_uv_err <= threshold) * jnt_visible
+ PCKh = 100. * np.sum(less_than_threshold, axis=1) / jnt_count
+
+ # save
+ rng = np.arange(0, 0.5 + 0.01, 0.01)
+ pckAll = np.zeros((len(rng), 16), dtype=np.float32)
+
+ for r, threshold in enumerate(rng):
+ less_than_threshold = (scaled_uv_err <= threshold) * jnt_visible
+ pckAll[r, :] = 100. * np.sum(less_than_threshold,
+ axis=1) / jnt_count
+
+ PCKh = np.ma.array(PCKh, mask=False)
+ PCKh.mask[6:8] = True
+
+ jnt_count = np.ma.array(jnt_count, mask=False)
+ jnt_count.mask[6:8] = True
+ jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64)
+
+ name_value = [ #noqa
+ ('Head', PCKh[head]),
+ ('Shoulder', 0.5 * (PCKh[lsho] + PCKh[rsho])),
+ ('Elbow', 0.5 * (PCKh[lelb] + PCKh[relb])),
+ ('Wrist', 0.5 * (PCKh[lwri] + PCKh[rwri])),
+ ('Hip', 0.5 * (PCKh[lhip] + PCKh[rhip])),
+ ('Knee', 0.5 * (PCKh[lkne] + PCKh[rkne])),
+ ('Ankle', 0.5 * (PCKh[lank] + PCKh[rank])),
+ ('PCKh', np.sum(PCKh * jnt_ratio)),
+ ('PCKh@0.1', np.sum(pckAll[11, :] * jnt_ratio))
+ ]
+ name_value = OrderedDict(name_value)
+
+ return name_value
+
+ def _sort_and_unique_bboxes(self, kpts, key='bbox_id'):
+ """sort kpts and remove the repeated ones."""
+ kpts = sorted(kpts, key=lambda x: x[key])
+ num = len(kpts)
+ for i in range(num - 1, 0, -1):
+ if kpts[i][key] == kpts[i - 1][key]:
+ del kpts[i]
+
+ return kpts
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/map_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/map_utils.py
new file mode 100644
index 000000000..9c96b9235
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/map_utils.py
@@ -0,0 +1,443 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import sys
+import numpy as np
+import itertools
+import paddle
+from ppdet.modeling.bbox_utils import poly2rbox, rbox2poly_np
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = [
+ 'draw_pr_curve',
+ 'bbox_area',
+ 'jaccard_overlap',
+ 'prune_zero_padding',
+ 'DetectionMAP',
+ 'ap_per_class',
+ 'compute_ap',
+]
+
+
+def draw_pr_curve(precision,
+ recall,
+ iou=0.5,
+ out_dir='pr_curve',
+ file_name='precision_recall_curve.jpg'):
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ output_path = os.path.join(out_dir, file_name)
+ try:
+ import matplotlib.pyplot as plt
+ except Exception as e:
+ logger.error('Matplotlib not found, plaese install matplotlib.'
+ 'for example: `pip install matplotlib`.')
+ raise e
+ plt.cla()
+ plt.figure('P-R Curve')
+ plt.title('Precision/Recall Curve(IoU={})'.format(iou))
+ plt.xlabel('Recall')
+ plt.ylabel('Precision')
+ plt.grid(True)
+ plt.plot(recall, precision)
+ plt.savefig(output_path)
+
+
+def bbox_area(bbox, is_bbox_normalized):
+ """
+ Calculate area of a bounding box
+ """
+ norm = 1. - float(is_bbox_normalized)
+ width = bbox[2] - bbox[0] + norm
+ height = bbox[3] - bbox[1] + norm
+ return width * height
+
+
+def jaccard_overlap(pred, gt, is_bbox_normalized=False):
+ """
+ Calculate jaccard overlap ratio between two bounding box
+ """
+ if pred[0] >= gt[2] or pred[2] <= gt[0] or \
+ pred[1] >= gt[3] or pred[3] <= gt[1]:
+ return 0.
+ inter_xmin = max(pred[0], gt[0])
+ inter_ymin = max(pred[1], gt[1])
+ inter_xmax = min(pred[2], gt[2])
+ inter_ymax = min(pred[3], gt[3])
+ inter_size = bbox_area([inter_xmin, inter_ymin, inter_xmax, inter_ymax],
+ is_bbox_normalized)
+ pred_size = bbox_area(pred, is_bbox_normalized)
+ gt_size = bbox_area(gt, is_bbox_normalized)
+ overlap = float(inter_size) / (pred_size + gt_size - inter_size)
+ return overlap
+
+
+def calc_rbox_iou(pred, gt_rbox):
+ """
+ calc iou between rotated bbox
+ """
+ # calc iou of bounding box for speedup
+ pred = np.array(pred, np.float32).reshape(-1, 8)
+ pred = pred.reshape(-1, 2)
+ gt_poly = rbox2poly_np(np.array(gt_rbox).reshape(-1, 5))[0]
+ gt_poly = gt_poly.reshape(-1, 2)
+ pred_rect = [
+ np.min(pred[:, 0]), np.min(pred[:, 1]), np.max(pred[:, 0]),
+ np.max(pred[:, 1])
+ ]
+ gt_rect = [
+ np.min(gt_poly[:, 0]), np.min(gt_poly[:, 1]), np.max(gt_poly[:, 0]),
+ np.max(gt_poly[:, 1])
+ ]
+ iou = jaccard_overlap(pred_rect, gt_rect, False)
+
+ if iou <= 0:
+ return iou
+
+ # calc rbox iou
+ pred = pred.reshape(-1, 8)
+
+ pred = np.array(pred, np.float32).reshape(-1, 8)
+ pred_rbox = poly2rbox(pred)
+ pred_rbox = pred_rbox.reshape(-1, 5)
+ pred_rbox = pred_rbox.reshape(-1, 5)
+ try:
+ from rbox_iou_ops import rbox_iou
+ except Exception as e:
+ print("import custom_ops error, try install rbox_iou_ops " \
+ "following ppdet/ext_op/README.md", e)
+ sys.stdout.flush()
+ sys.exit(-1)
+ gt_rbox = np.array(gt_rbox, np.float32).reshape(-1, 5)
+ pd_gt_rbox = paddle.to_tensor(gt_rbox, dtype='float32')
+ pd_pred_rbox = paddle.to_tensor(pred_rbox, dtype='float32')
+ iou = rbox_iou(pd_gt_rbox, pd_pred_rbox)
+ iou = iou.numpy()
+ return iou[0][0]
+
+
+def prune_zero_padding(gt_box, gt_label, difficult=None):
+ valid_cnt = 0
+ for i in range(len(gt_box)):
+ if gt_box[i, 0] == 0 and gt_box[i, 1] == 0 and \
+ gt_box[i, 2] == 0 and gt_box[i, 3] == 0:
+ break
+ valid_cnt += 1
+ return (gt_box[:valid_cnt], gt_label[:valid_cnt], difficult[:valid_cnt]
+ if difficult is not None else None)
+
+
+class DetectionMAP(object):
+ """
+ Calculate detection mean average precision.
+ Currently support two types: 11point and integral
+
+ Args:
+ class_num (int): The class number.
+ overlap_thresh (float): The threshold of overlap
+ ratio between prediction bounding box and
+ ground truth bounding box for deciding
+ true/false positive. Default 0.5.
+ map_type (str): Calculation method of mean average
+ precision, currently support '11point' and
+ 'integral'. Default '11point'.
+ is_bbox_normalized (bool): Whether bounding boxes
+ is normalized to range[0, 1]. Default False.
+ evaluate_difficult (bool): Whether to evaluate
+ difficult bounding boxes. Default False.
+ catid2name (dict): Mapping between category id and category name.
+ classwise (bool): Whether per-category AP and draw
+ P-R Curve or not.
+ """
+
+ def __init__(self,
+ class_num,
+ overlap_thresh=0.5,
+ map_type='11point',
+ is_bbox_normalized=False,
+ evaluate_difficult=False,
+ catid2name=None,
+ classwise=False):
+ self.class_num = class_num
+ self.overlap_thresh = overlap_thresh
+ assert map_type in ['11point', 'integral'], \
+ "map_type currently only support '11point' "\
+ "and 'integral'"
+ self.map_type = map_type
+ self.is_bbox_normalized = is_bbox_normalized
+ self.evaluate_difficult = evaluate_difficult
+ self.classwise = classwise
+ self.classes = []
+ for cname in catid2name.values():
+ self.classes.append(cname)
+ self.reset()
+
+ def update(self, bbox, score, label, gt_box, gt_label, difficult=None):
+ """
+ Update metric statics from given prediction and ground
+ truth infomations.
+ """
+ if difficult is None:
+ difficult = np.zeros_like(gt_label)
+
+ # record class gt count
+ for gtl, diff in zip(gt_label, difficult):
+ if self.evaluate_difficult or int(diff) == 0:
+ self.class_gt_counts[int(np.array(gtl))] += 1
+
+ # record class score positive
+ visited = [False] * len(gt_label)
+ for b, s, l in zip(bbox, score, label):
+ pred = b.tolist() if isinstance(b, np.ndarray) else b
+ max_idx = -1
+ max_overlap = -1.0
+ for i, gl in enumerate(gt_label):
+ if int(gl) == int(l):
+ if len(gt_box[i]) == 5:
+ overlap = calc_rbox_iou(pred, gt_box[i])
+ else:
+ overlap = jaccard_overlap(pred, gt_box[i],
+ self.is_bbox_normalized)
+ if overlap > max_overlap:
+ max_overlap = overlap
+ max_idx = i
+
+ if max_overlap > self.overlap_thresh:
+ if self.evaluate_difficult or \
+ int(np.array(difficult[max_idx])) == 0:
+ if not visited[max_idx]:
+ self.class_score_poss[int(l)].append([s, 1.0])
+ visited[max_idx] = True
+ else:
+ self.class_score_poss[int(l)].append([s, 0.0])
+ else:
+ self.class_score_poss[int(l)].append([s, 0.0])
+
+ def reset(self):
+ """
+ Reset metric statics
+ """
+ self.class_score_poss = [[] for _ in range(self.class_num)]
+ self.class_gt_counts = [0] * self.class_num
+ self.mAP = 0.0
+
+ def accumulate(self):
+ """
+ Accumulate metric results and calculate mAP
+ """
+ mAP = 0.
+ valid_cnt = 0
+ eval_results = []
+ for score_pos, count in zip(self.class_score_poss,
+ self.class_gt_counts):
+ if count == 0: continue
+ if len(score_pos) == 0:
+ valid_cnt += 1
+ continue
+
+ accum_tp_list, accum_fp_list = \
+ self._get_tp_fp_accum(score_pos)
+ precision = []
+ recall = []
+ for ac_tp, ac_fp in zip(accum_tp_list, accum_fp_list):
+ precision.append(float(ac_tp) / (ac_tp + ac_fp))
+ recall.append(float(ac_tp) / count)
+
+ one_class_ap = 0.0
+ if self.map_type == '11point':
+ max_precisions = [0.] * 11
+ start_idx = len(precision) - 1
+ for j in range(10, -1, -1):
+ for i in range(start_idx, -1, -1):
+ if recall[i] < float(j) / 10.:
+ start_idx = i
+ if j > 0:
+ max_precisions[j - 1] = max_precisions[j]
+ break
+ else:
+ if max_precisions[j] < precision[i]:
+ max_precisions[j] = precision[i]
+ one_class_ap = sum(max_precisions) / 11.
+ mAP += one_class_ap
+ valid_cnt += 1
+ elif self.map_type == 'integral':
+ import math
+ prev_recall = 0.
+ for i in range(len(precision)):
+ recall_gap = math.fabs(recall[i] - prev_recall)
+ if recall_gap > 1e-6:
+ one_class_ap += precision[i] * recall_gap
+ prev_recall = recall[i]
+ mAP += one_class_ap
+ valid_cnt += 1
+ else:
+ logger.error("Unspported mAP type {}".format(self.map_type))
+ sys.exit(1)
+ eval_results.append({
+ 'class': self.classes[valid_cnt - 1],
+ 'ap': one_class_ap,
+ 'precision': precision,
+ 'recall': recall,
+ })
+ self.eval_results = eval_results
+ self.mAP = mAP / float(valid_cnt) if valid_cnt > 0 else mAP
+
+ def get_map(self):
+ """
+ Get mAP result
+ """
+ if self.mAP is None:
+ logger.error("mAP is not calculated.")
+ if self.classwise:
+ # Compute per-category AP and PR curve
+ try:
+ from terminaltables import AsciiTable
+ except Exception as e:
+ logger.error(
+ 'terminaltables not found, plaese install terminaltables. '
+ 'for example: `pip install terminaltables`.')
+ raise e
+ results_per_category = []
+ for eval_result in self.eval_results:
+ results_per_category.append(
+ (str(eval_result['class']),
+ '{:0.3f}'.format(float(eval_result['ap']))))
+ draw_pr_curve(
+ eval_result['precision'],
+ eval_result['recall'],
+ out_dir='voc_pr_curve',
+ file_name='{}_precision_recall_curve.jpg'.format(
+ eval_result['class']))
+
+ num_columns = min(6, len(results_per_category) * 2)
+ results_flatten = list(itertools.chain(*results_per_category))
+ headers = ['category', 'AP'] * (num_columns // 2)
+ results_2d = itertools.zip_longest(
+ *[results_flatten[i::num_columns] for i in range(num_columns)])
+ table_data = [headers]
+ table_data += [result for result in results_2d]
+ table = AsciiTable(table_data)
+ logger.info('Per-category of VOC AP: \n{}'.format(table.table))
+ logger.info(
+ "per-category PR curve has output to voc_pr_curve folder.")
+ return self.mAP
+
+ def _get_tp_fp_accum(self, score_pos_list):
+ """
+ Calculate accumulating true/false positive results from
+ [score, pos] records
+ """
+ sorted_list = sorted(score_pos_list, key=lambda s: s[0], reverse=True)
+ accum_tp = 0
+ accum_fp = 0
+ accum_tp_list = []
+ accum_fp_list = []
+ for (score, pos) in sorted_list:
+ accum_tp += int(pos)
+ accum_tp_list.append(accum_tp)
+ accum_fp += 1 - int(pos)
+ accum_fp_list.append(accum_fp)
+ return accum_tp_list, accum_fp_list
+
+
+def ap_per_class(tp, conf, pred_cls, target_cls):
+ """
+ Computes the average precision, given the recall and precision curves.
+ Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
+
+ Args:
+ tp (list): True positives.
+ conf (list): Objectness value from 0-1.
+ pred_cls (list): Predicted object classes.
+ target_cls (list): Target object classes.
+ """
+ tp, conf, pred_cls, target_cls = np.array(tp), np.array(conf), np.array(
+ pred_cls), np.array(target_cls)
+
+ # Sort by objectness
+ i = np.argsort(-conf)
+ tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
+
+ # Find unique classes
+ unique_classes = np.unique(np.concatenate((pred_cls, target_cls), 0))
+
+ # Create Precision-Recall curve and compute AP for each class
+ ap, p, r = [], [], []
+ for c in unique_classes:
+ i = pred_cls == c
+ n_gt = sum(target_cls == c) # Number of ground truth objects
+ n_p = sum(i) # Number of predicted objects
+
+ if (n_p == 0) and (n_gt == 0):
+ continue
+ elif (n_p == 0) or (n_gt == 0):
+ ap.append(0)
+ r.append(0)
+ p.append(0)
+ else:
+ # Accumulate FPs and TPs
+ fpc = np.cumsum(1 - tp[i])
+ tpc = np.cumsum(tp[i])
+
+ # Recall
+ recall_curve = tpc / (n_gt + 1e-16)
+ r.append(tpc[-1] / (n_gt + 1e-16))
+
+ # Precision
+ precision_curve = tpc / (tpc + fpc)
+ p.append(tpc[-1] / (tpc[-1] + fpc[-1]))
+
+ # AP from recall-precision curve
+ ap.append(compute_ap(recall_curve, precision_curve))
+
+ return np.array(ap), unique_classes.astype('int32'), np.array(r), np.array(
+ p)
+
+
+def compute_ap(recall, precision):
+ """
+ Computes the average precision, given the recall and precision curves.
+ Code originally from https://github.com/rbgirshick/py-faster-rcnn.
+
+ Args:
+ recall (list): The recall curve.
+ precision (list): The precision curve.
+
+ Returns:
+ The average precision as computed in py-faster-rcnn.
+ """
+ # correct AP calculation
+ # first append sentinel values at the end
+ mrec = np.concatenate(([0.], recall, [1.]))
+ mpre = np.concatenate(([0.], precision, [0.]))
+
+ # compute the precision envelope
+ for i in range(mpre.size - 1, 0, -1):
+ mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
+
+ # to calculate area under PR curve, look for points
+ # where X axis (recall) changes value
+ i = np.where(mrec[1:] != mrec[:-1])[0]
+
+ # and sum (\Delta recall) * prec
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
+ return ap
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/mcmot_metrics.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/mcmot_metrics.py
new file mode 100644
index 000000000..9f329c8e0
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/mcmot_metrics.py
@@ -0,0 +1,467 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import copy
+import sys
+import math
+from collections import defaultdict
+from motmetrics.math_util import quiet_divide
+
+import numpy as np
+import pandas as pd
+
+import paddle
+import paddle.nn.functional as F
+from .metrics import Metric
+import motmetrics as mm
+import openpyxl
+metrics = mm.metrics.motchallenge_metrics
+mh = mm.metrics.create()
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['MCMOTEvaluator', 'MCMOTMetric']
+
+METRICS_LIST = [
+ 'num_frames', 'num_matches', 'num_switches', 'num_transfer', 'num_ascend',
+ 'num_migrate', 'num_false_positives', 'num_misses', 'num_detections',
+ 'num_objects', 'num_predictions', 'num_unique_objects', 'mostly_tracked',
+ 'partially_tracked', 'mostly_lost', 'num_fragmentations', 'motp', 'mota',
+ 'precision', 'recall', 'idfp', 'idfn', 'idtp', 'idp', 'idr', 'idf1'
+]
+
+NAME_MAP = {
+ 'num_frames': 'num_frames',
+ 'num_matches': 'num_matches',
+ 'num_switches': 'IDs',
+ 'num_transfer': 'IDt',
+ 'num_ascend': 'IDa',
+ 'num_migrate': 'IDm',
+ 'num_false_positives': 'FP',
+ 'num_misses': 'FN',
+ 'num_detections': 'num_detections',
+ 'num_objects': 'num_objects',
+ 'num_predictions': 'num_predictions',
+ 'num_unique_objects': 'GT',
+ 'mostly_tracked': 'MT',
+ 'partially_tracked': 'partially_tracked',
+ 'mostly_lost': 'ML',
+ 'num_fragmentations': 'FM',
+ 'motp': 'MOTP',
+ 'mota': 'MOTA',
+ 'precision': 'Prcn',
+ 'recall': 'Rcll',
+ 'idfp': 'idfp',
+ 'idfn': 'idfn',
+ 'idtp': 'idtp',
+ 'idp': 'IDP',
+ 'idr': 'IDR',
+ 'idf1': 'IDF1'
+}
+
+
+def parse_accs_metrics(seq_acc, index_name, verbose=False):
+ """
+ Parse the evaluation indicators of multiple MOTAccumulator
+ """
+ mh = mm.metrics.create()
+ summary = MCMOTEvaluator.get_summary(seq_acc, index_name, METRICS_LIST)
+ summary.loc['OVERALL', 'motp'] = (summary['motp'] * summary['num_detections']).sum() / \
+ summary.loc['OVERALL', 'num_detections']
+ if verbose:
+ strsummary = mm.io.render_summary(
+ summary, formatters=mh.formatters, namemap=NAME_MAP)
+ print(strsummary)
+
+ return summary
+
+
+def seqs_overall_metrics(summary_df, verbose=False):
+ """
+ Calculate overall metrics for multiple sequences
+ """
+ add_col = [
+ 'num_frames', 'num_matches', 'num_switches', 'num_transfer',
+ 'num_ascend', 'num_migrate', 'num_false_positives', 'num_misses',
+ 'num_detections', 'num_objects', 'num_predictions',
+ 'num_unique_objects', 'mostly_tracked', 'partially_tracked',
+ 'mostly_lost', 'num_fragmentations', 'idfp', 'idfn', 'idtp'
+ ]
+ calc_col = ['motp', 'mota', 'precision', 'recall', 'idp', 'idr', 'idf1']
+ calc_df = summary_df.copy()
+
+ overall_dic = {}
+ for col in add_col:
+ overall_dic[col] = calc_df[col].sum()
+
+ for col in calc_col:
+ overall_dic[col] = getattr(MCMOTMetricOverall, col + '_overall')(
+ calc_df, overall_dic)
+
+ overall_df = pd.DataFrame(overall_dic, index=['overall_calc'])
+ calc_df = pd.concat([calc_df, overall_df])
+
+ if verbose:
+ mh = mm.metrics.create()
+ str_calc_df = mm.io.render_summary(
+ calc_df, formatters=mh.formatters, namemap=NAME_MAP)
+ print(str_calc_df)
+
+ return calc_df
+
+
+class MCMOTMetricOverall(object):
+ def motp_overall(summary_df, overall_dic):
+ motp = quiet_divide((summary_df['motp'] *
+ summary_df['num_detections']).sum(),
+ overall_dic['num_detections'])
+ return motp
+
+ def mota_overall(summary_df, overall_dic):
+ del summary_df
+ mota = 1. - quiet_divide(
+ (overall_dic['num_misses'] + overall_dic['num_switches'] +
+ overall_dic['num_false_positives']), overall_dic['num_objects'])
+ return mota
+
+ def precision_overall(summary_df, overall_dic):
+ del summary_df
+ precision = quiet_divide(overall_dic['num_detections'], (
+ overall_dic['num_false_positives'] + overall_dic['num_detections']))
+ return precision
+
+ def recall_overall(summary_df, overall_dic):
+ del summary_df
+ recall = quiet_divide(overall_dic['num_detections'],
+ overall_dic['num_objects'])
+ return recall
+
+ def idp_overall(summary_df, overall_dic):
+ del summary_df
+ idp = quiet_divide(overall_dic['idtp'],
+ (overall_dic['idtp'] + overall_dic['idfp']))
+ return idp
+
+ def idr_overall(summary_df, overall_dic):
+ del summary_df
+ idr = quiet_divide(overall_dic['idtp'],
+ (overall_dic['idtp'] + overall_dic['idfn']))
+ return idr
+
+ def idf1_overall(summary_df, overall_dic):
+ del summary_df
+ idf1 = quiet_divide(2. * overall_dic['idtp'], (
+ overall_dic['num_objects'] + overall_dic['num_predictions']))
+ return idf1
+
+
+def read_mcmot_results_union(filename, is_gt, is_ignore):
+ results_dict = dict()
+ if os.path.isfile(filename):
+ all_result = np.loadtxt(filename, delimiter=',')
+ if all_result.shape[0] == 0 or all_result.shape[1] < 7:
+ return results_dict
+ if is_ignore:
+ return results_dict
+ if is_gt:
+ # only for test use
+ all_result = all_result[all_result[:, 7] != 0]
+ all_result[:, 7] = all_result[:, 7] - 1
+
+ if all_result.shape[0] == 0:
+ return results_dict
+
+ class_unique = np.unique(all_result[:, 7])
+
+ last_max_id = 0
+ result_cls_list = []
+ for cls in class_unique:
+ result_cls_split = all_result[all_result[:, 7] == cls]
+ result_cls_split[:, 1] = result_cls_split[:, 1] + last_max_id
+ # make sure track id different between every category
+ last_max_id = max(np.unique(result_cls_split[:, 1])) + 1
+ result_cls_list.append(result_cls_split)
+
+ results_con = np.concatenate(result_cls_list)
+
+ for line in range(len(results_con)):
+ linelist = results_con[line]
+ fid = int(linelist[0])
+ if fid < 1:
+ continue
+ results_dict.setdefault(fid, list())
+
+ if is_gt:
+ score = 1
+ else:
+ score = float(linelist[6])
+
+ tlwh = tuple(map(float, linelist[2:6]))
+ target_id = int(linelist[1])
+ cls = int(linelist[7])
+
+ results_dict[fid].append((tlwh, target_id, cls, score))
+
+ return results_dict
+
+
+def read_mcmot_results(filename, is_gt, is_ignore):
+ results_dict = dict()
+ if os.path.isfile(filename):
+ with open(filename, 'r') as f:
+ for line in f.readlines():
+ linelist = line.strip().split(',')
+ if len(linelist) < 7:
+ continue
+ fid = int(linelist[0])
+ if fid < 1:
+ continue
+ cid = int(linelist[7])
+ if is_gt:
+ score = 1
+ # only for test use
+ cid -= 1
+ else:
+ score = float(linelist[6])
+
+ cls_result_dict = results_dict.setdefault(cid, dict())
+ cls_result_dict.setdefault(fid, list())
+
+ tlwh = tuple(map(float, linelist[2:6]))
+ target_id = int(linelist[1])
+ cls_result_dict[fid].append((tlwh, target_id, score))
+ return results_dict
+
+
+def read_results(filename,
+ data_type,
+ is_gt=False,
+ is_ignore=False,
+ multi_class=False,
+ union=False):
+ if data_type in ['mcmot', 'lab']:
+ if multi_class:
+ if union:
+ # The results are evaluated by union all the categories.
+ # Track IDs between different categories cannot be duplicate.
+ read_fun = read_mcmot_results_union
+ else:
+ # The results are evaluated separately by category.
+ read_fun = read_mcmot_results
+ else:
+ raise ValueError('multi_class: {}, MCMOT should have cls_id.'.
+ format(multi_class))
+ else:
+ raise ValueError('Unknown data type: {}'.format(data_type))
+
+ return read_fun(filename, is_gt, is_ignore)
+
+
+def unzip_objs(objs):
+ if len(objs) > 0:
+ tlwhs, ids, scores = zip(*objs)
+ else:
+ tlwhs, ids, scores = [], [], []
+ tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
+ return tlwhs, ids, scores
+
+
+def unzip_objs_cls(objs):
+ if len(objs) > 0:
+ tlwhs, ids, cls, scores = zip(*objs)
+ else:
+ tlwhs, ids, cls, scores = [], [], [], []
+ tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
+ ids = np.array(ids)
+ cls = np.array(cls)
+ scores = np.array(scores)
+ return tlwhs, ids, cls, scores
+
+
+class MCMOTEvaluator(object):
+ def __init__(self, data_root, seq_name, data_type, num_classes):
+ self.data_root = data_root
+ self.seq_name = seq_name
+ self.data_type = data_type
+ self.num_classes = num_classes
+
+ self.load_annotations()
+ self.reset_accumulator()
+
+ self.class_accs = []
+
+ def load_annotations(self):
+ assert self.data_type == 'mcmot'
+ self.gt_filename = os.path.join(self.data_root, '../', '../',
+ 'sequences',
+ '{}.txt'.format(self.seq_name))
+
+ def reset_accumulator(self):
+ import motmetrics as mm
+ mm.lap.default_solver = 'lap'
+ self.acc = mm.MOTAccumulator(auto_id=True)
+
+ def eval_frame_dict(self, trk_objs, gt_objs, rtn_events=False, union=False):
+ import motmetrics as mm
+ mm.lap.default_solver = 'lap'
+ if union:
+ trk_tlwhs, trk_ids, trk_cls = unzip_objs_cls(trk_objs)[:3]
+ gt_tlwhs, gt_ids, gt_cls = unzip_objs_cls(gt_objs)[:3]
+
+ # get distance matrix
+ iou_distance = mm.distances.iou_matrix(
+ gt_tlwhs, trk_tlwhs, max_iou=0.5)
+
+ # Set the distance between objects of different categories to nan
+ gt_cls_len = len(gt_cls)
+ trk_cls_len = len(trk_cls)
+ # When the number of GT or Trk is 0, iou_distance dimension is (0,0)
+ if gt_cls_len != 0 and trk_cls_len != 0:
+ gt_cls = gt_cls.reshape(gt_cls_len, 1)
+ gt_cls = np.repeat(gt_cls, trk_cls_len, axis=1)
+ trk_cls = trk_cls.reshape(1, trk_cls_len)
+ trk_cls = np.repeat(trk_cls, gt_cls_len, axis=0)
+ iou_distance = np.where(gt_cls == trk_cls, iou_distance, np.nan)
+
+ else:
+ trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
+ gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
+
+ # get distance matrix
+ iou_distance = mm.distances.iou_matrix(
+ gt_tlwhs, trk_tlwhs, max_iou=0.5)
+
+ self.acc.update(gt_ids, trk_ids, iou_distance)
+
+ if rtn_events and iou_distance.size > 0 and hasattr(self.acc,
+ 'mot_events'):
+ events = self.acc.mot_events # only supported by https://github.com/longcw/py-motmetrics
+ else:
+ events = None
+ return events
+
+ def eval_file(self, result_filename):
+ # evaluation of each category
+ gt_frame_dict = read_results(
+ self.gt_filename,
+ self.data_type,
+ is_gt=True,
+ multi_class=True,
+ union=False)
+ result_frame_dict = read_results(
+ result_filename,
+ self.data_type,
+ is_gt=False,
+ multi_class=True,
+ union=False)
+
+ for cid in range(self.num_classes):
+ self.reset_accumulator()
+ cls_result_frame_dict = result_frame_dict.setdefault(cid, dict())
+ cls_gt_frame_dict = gt_frame_dict.setdefault(cid, dict())
+
+ # only labeled frames will be evaluated
+ frames = sorted(list(set(cls_gt_frame_dict.keys())))
+
+ for frame_id in frames:
+ trk_objs = cls_result_frame_dict.get(frame_id, [])
+ gt_objs = cls_gt_frame_dict.get(frame_id, [])
+ self.eval_frame_dict(trk_objs, gt_objs, rtn_events=False)
+
+ self.class_accs.append(self.acc)
+
+ return self.class_accs
+
+ @staticmethod
+ def get_summary(accs,
+ names,
+ metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1',
+ 'precision', 'recall')):
+ import motmetrics as mm
+ mm.lap.default_solver = 'lap'
+
+ names = copy.deepcopy(names)
+ if metrics is None:
+ metrics = mm.metrics.motchallenge_metrics
+ metrics = copy.deepcopy(metrics)
+
+ mh = mm.metrics.create()
+ summary = mh.compute_many(
+ accs, metrics=metrics, names=names, generate_overall=True)
+
+ return summary
+
+ @staticmethod
+ def save_summary(summary, filename):
+ import pandas as pd
+ writer = pd.ExcelWriter(filename)
+ summary.to_excel(writer)
+ writer.save()
+
+
+class MCMOTMetric(Metric):
+ def __init__(self, num_classes, save_summary=False):
+ self.num_classes = num_classes
+ self.save_summary = save_summary
+ self.MCMOTEvaluator = MCMOTEvaluator
+ self.result_root = None
+ self.reset()
+
+ self.seqs_overall = defaultdict(list)
+
+ def reset(self):
+ self.accs = []
+ self.seqs = []
+
+ def update(self, data_root, seq, data_type, result_root, result_filename):
+ evaluator = self.MCMOTEvaluator(data_root, seq, data_type,
+ self.num_classes)
+ seq_acc = evaluator.eval_file(result_filename)
+ self.accs.append(seq_acc)
+ self.seqs.append(seq)
+ self.result_root = result_root
+
+ cls_index_name = [
+ '{}_{}'.format(seq, i) for i in range(self.num_classes)
+ ]
+ summary = parse_accs_metrics(seq_acc, cls_index_name)
+ summary.rename(
+ index={'OVERALL': '{}_OVERALL'.format(seq)}, inplace=True)
+ for row in range(len(summary)):
+ self.seqs_overall[row].append(summary.iloc[row:row + 1])
+
+ def accumulate(self):
+ self.cls_summary_list = []
+ for row in range(self.num_classes):
+ seqs_cls_df = pd.concat(self.seqs_overall[row])
+ seqs_cls_summary = seqs_overall_metrics(seqs_cls_df)
+ cls_summary_overall = seqs_cls_summary.iloc[-1:].copy()
+ cls_summary_overall.rename(
+ index={'overall_calc': 'overall_calc_{}'.format(row)},
+ inplace=True)
+ self.cls_summary_list.append(cls_summary_overall)
+
+ def log(self):
+ seqs_summary = seqs_overall_metrics(
+ pd.concat(self.seqs_overall[self.num_classes]), verbose=True)
+ class_summary = seqs_overall_metrics(
+ pd.concat(self.cls_summary_list), verbose=True)
+
+ def get_results(self):
+ return 1
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/metrics.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/metrics.py
new file mode 100644
index 000000000..f9913b7fb
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/metrics.py
@@ -0,0 +1,432 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+import json
+import paddle
+import numpy as np
+
+from .map_utils import prune_zero_padding, DetectionMAP
+from .coco_utils import get_infer_results, cocoapi_eval
+from .widerface_utils import face_eval_run
+from ppdet.data.source.category import get_categories
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = [
+ 'Metric',
+ 'COCOMetric',
+ 'VOCMetric',
+ 'WiderFaceMetric',
+ 'get_infer_results',
+ 'RBoxMetric',
+ 'SNIPERCOCOMetric'
+]
+
+COCO_SIGMAS = np.array([
+ .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87,
+ .89, .89
+]) / 10.0
+CROWD_SIGMAS = np.array(
+ [.79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .79,
+ .79]) / 10.0
+
+
+class Metric(paddle.metric.Metric):
+ def name(self):
+ return self.__class__.__name__
+
+ def reset(self):
+ pass
+
+ def accumulate(self):
+ pass
+
+ # paddle.metric.Metric defined :metch:`update`, :meth:`accumulate`
+ # :metch:`reset`, in ppdet, we also need following 2 methods:
+
+ # abstract method for logging metric results
+ def log(self):
+ pass
+
+ # abstract method for getting metric results
+ def get_results(self):
+ pass
+
+
+class COCOMetric(Metric):
+ def __init__(self, anno_file, **kwargs):
+ assert os.path.isfile(anno_file), \
+ "anno_file {} not a file".format(anno_file)
+ self.anno_file = anno_file
+ self.clsid2catid = kwargs.get('clsid2catid', None)
+ if self.clsid2catid is None:
+ self.clsid2catid, _ = get_categories('COCO', anno_file)
+ self.classwise = kwargs.get('classwise', False)
+ self.output_eval = kwargs.get('output_eval', None)
+ # TODO: bias should be unified
+ self.bias = kwargs.get('bias', 0)
+ self.save_prediction_only = kwargs.get('save_prediction_only', False)
+ self.iou_type = kwargs.get('IouType', 'bbox')
+ self.reset()
+
+ def reset(self):
+ # only bbox and mask evaluation support currently
+ self.results = {'bbox': [], 'mask': [], 'segm': [], 'keypoint': []}
+ self.eval_results = {}
+
+ def update(self, inputs, outputs):
+ outs = {}
+ # outputs Tensor -> numpy.ndarray
+ for k, v in outputs.items():
+ outs[k] = v.numpy() if isinstance(v, paddle.Tensor) else v
+
+ im_id = inputs['im_id']
+ outs['im_id'] = im_id.numpy() if isinstance(im_id,
+ paddle.Tensor) else im_id
+
+ infer_results = get_infer_results(
+ outs, self.clsid2catid, bias=self.bias)
+ self.results['bbox'] += infer_results[
+ 'bbox'] if 'bbox' in infer_results else []
+ self.results['mask'] += infer_results[
+ 'mask'] if 'mask' in infer_results else []
+ self.results['segm'] += infer_results[
+ 'segm'] if 'segm' in infer_results else []
+ self.results['keypoint'] += infer_results[
+ 'keypoint'] if 'keypoint' in infer_results else []
+
+ def accumulate(self):
+ if len(self.results['bbox']) > 0:
+ output = "bbox.json"
+ if self.output_eval:
+ output = os.path.join(self.output_eval, output)
+ with open(output, 'w') as f:
+ json.dump(self.results['bbox'], f)
+ logger.info('The bbox result is saved to bbox.json.')
+
+ if self.save_prediction_only:
+ logger.info('The bbox result is saved to {} and do not '
+ 'evaluate the mAP.'.format(output))
+ else:
+ bbox_stats = cocoapi_eval(
+ output,
+ 'bbox',
+ anno_file=self.anno_file,
+ classwise=self.classwise)
+ self.eval_results['bbox'] = bbox_stats
+ sys.stdout.flush()
+
+ if len(self.results['mask']) > 0:
+ output = "mask.json"
+ if self.output_eval:
+ output = os.path.join(self.output_eval, output)
+ with open(output, 'w') as f:
+ json.dump(self.results['mask'], f)
+ logger.info('The mask result is saved to mask.json.')
+
+ if self.save_prediction_only:
+ logger.info('The mask result is saved to {} and do not '
+ 'evaluate the mAP.'.format(output))
+ else:
+ seg_stats = cocoapi_eval(
+ output,
+ 'segm',
+ anno_file=self.anno_file,
+ classwise=self.classwise)
+ self.eval_results['mask'] = seg_stats
+ sys.stdout.flush()
+
+ if len(self.results['segm']) > 0:
+ output = "segm.json"
+ if self.output_eval:
+ output = os.path.join(self.output_eval, output)
+ with open(output, 'w') as f:
+ json.dump(self.results['segm'], f)
+ logger.info('The segm result is saved to segm.json.')
+
+ if self.save_prediction_only:
+ logger.info('The segm result is saved to {} and do not '
+ 'evaluate the mAP.'.format(output))
+ else:
+ seg_stats = cocoapi_eval(
+ output,
+ 'segm',
+ anno_file=self.anno_file,
+ classwise=self.classwise)
+ self.eval_results['mask'] = seg_stats
+ sys.stdout.flush()
+
+ if len(self.results['keypoint']) > 0:
+ output = "keypoint.json"
+ if self.output_eval:
+ output = os.path.join(self.output_eval, output)
+ with open(output, 'w') as f:
+ json.dump(self.results['keypoint'], f)
+ logger.info('The keypoint result is saved to keypoint.json.')
+
+ if self.save_prediction_only:
+ logger.info('The keypoint result is saved to {} and do not '
+ 'evaluate the mAP.'.format(output))
+ else:
+ style = 'keypoints'
+ use_area = True
+ sigmas = COCO_SIGMAS
+ if self.iou_type == 'keypoints_crowd':
+ style = 'keypoints_crowd'
+ use_area = False
+ sigmas = CROWD_SIGMAS
+ keypoint_stats = cocoapi_eval(
+ output,
+ style,
+ anno_file=self.anno_file,
+ classwise=self.classwise,
+ sigmas=sigmas,
+ use_area=use_area)
+ self.eval_results['keypoint'] = keypoint_stats
+ sys.stdout.flush()
+
+ def log(self):
+ pass
+
+ def get_results(self):
+ return self.eval_results
+
+
+class VOCMetric(Metric):
+ def __init__(self,
+ label_list,
+ class_num=20,
+ overlap_thresh=0.5,
+ map_type='11point',
+ is_bbox_normalized=False,
+ evaluate_difficult=False,
+ classwise=False):
+ assert os.path.isfile(label_list), \
+ "label_list {} not a file".format(label_list)
+ self.clsid2catid, self.catid2name = get_categories('VOC', label_list)
+
+ self.overlap_thresh = overlap_thresh
+ self.map_type = map_type
+ self.evaluate_difficult = evaluate_difficult
+ self.detection_map = DetectionMAP(
+ class_num=class_num,
+ overlap_thresh=overlap_thresh,
+ map_type=map_type,
+ is_bbox_normalized=is_bbox_normalized,
+ evaluate_difficult=evaluate_difficult,
+ catid2name=self.catid2name,
+ classwise=classwise)
+
+ self.reset()
+
+ def reset(self):
+ self.detection_map.reset()
+
+ def update(self, inputs, outputs):
+ bbox_np = outputs['bbox'].numpy()
+ bboxes = bbox_np[:, 2:]
+ scores = bbox_np[:, 1]
+ labels = bbox_np[:, 0]
+ bbox_lengths = outputs['bbox_num'].numpy()
+
+ if bboxes.shape == (1, 1) or bboxes is None:
+ return
+ gt_boxes = inputs['gt_bbox']
+ gt_labels = inputs['gt_class']
+ difficults = inputs['difficult'] if not self.evaluate_difficult \
+ else None
+
+ scale_factor = inputs['scale_factor'].numpy(
+ ) if 'scale_factor' in inputs else np.ones(
+ (gt_boxes.shape[0], 2)).astype('float32')
+
+ bbox_idx = 0
+ for i in range(len(gt_boxes)):
+ gt_box = gt_boxes[i].numpy()
+ h, w = scale_factor[i]
+ gt_box = gt_box / np.array([w, h, w, h])
+ gt_label = gt_labels[i].numpy()
+ difficult = None if difficults is None \
+ else difficults[i].numpy()
+ bbox_num = bbox_lengths[i]
+ bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
+ score = scores[bbox_idx:bbox_idx + bbox_num]
+ label = labels[bbox_idx:bbox_idx + bbox_num]
+ gt_box, gt_label, difficult = prune_zero_padding(gt_box, gt_label,
+ difficult)
+ self.detection_map.update(bbox, score, label, gt_box, gt_label,
+ difficult)
+ bbox_idx += bbox_num
+
+ def accumulate(self):
+ logger.info("Accumulating evaluatation results...")
+ self.detection_map.accumulate()
+
+ def log(self):
+ map_stat = 100. * self.detection_map.get_map()
+ logger.info("mAP({:.2f}, {}) = {:.2f}%".format(self.overlap_thresh,
+ self.map_type, map_stat))
+
+ def get_results(self):
+ return {'bbox': [self.detection_map.get_map()]}
+
+
+class WiderFaceMetric(Metric):
+ def __init__(self, image_dir, anno_file, multi_scale=True):
+ self.image_dir = image_dir
+ self.anno_file = anno_file
+ self.multi_scale = multi_scale
+ self.clsid2catid, self.catid2name = get_categories('widerface')
+
+ def update(self, model):
+
+ face_eval_run(
+ model,
+ self.image_dir,
+ self.anno_file,
+ pred_dir='output/pred',
+ eval_mode='widerface',
+ multi_scale=self.multi_scale)
+
+
+class RBoxMetric(Metric):
+ def __init__(self, anno_file, **kwargs):
+ assert os.path.isfile(anno_file), \
+ "anno_file {} not a file".format(anno_file)
+ assert os.path.exists(anno_file), "anno_file {} not exists".format(
+ anno_file)
+ self.anno_file = anno_file
+ self.gt_anno = json.load(open(self.anno_file))
+ cats = self.gt_anno['categories']
+ self.clsid2catid = {i: cat['id'] for i, cat in enumerate(cats)}
+ self.catid2clsid = {cat['id']: i for i, cat in enumerate(cats)}
+ self.catid2name = {cat['id']: cat['name'] for cat in cats}
+ self.classwise = kwargs.get('classwise', False)
+ self.output_eval = kwargs.get('output_eval', None)
+ # TODO: bias should be unified
+ self.bias = kwargs.get('bias', 0)
+ self.save_prediction_only = kwargs.get('save_prediction_only', False)
+ self.iou_type = kwargs.get('IouType', 'bbox')
+ self.overlap_thresh = kwargs.get('overlap_thresh', 0.5)
+ self.map_type = kwargs.get('map_type', '11point')
+ self.evaluate_difficult = kwargs.get('evaluate_difficult', False)
+ class_num = len(self.catid2name)
+ self.detection_map = DetectionMAP(
+ class_num=class_num,
+ overlap_thresh=self.overlap_thresh,
+ map_type=self.map_type,
+ is_bbox_normalized=False,
+ evaluate_difficult=self.evaluate_difficult,
+ catid2name=self.catid2name,
+ classwise=self.classwise)
+
+ self.reset()
+
+ def reset(self):
+ self.result_bbox = []
+ self.detection_map.reset()
+
+ def update(self, inputs, outputs):
+ outs = {}
+ # outputs Tensor -> numpy.ndarray
+ for k, v in outputs.items():
+ outs[k] = v.numpy() if isinstance(v, paddle.Tensor) else v
+
+ im_id = inputs['im_id']
+ outs['im_id'] = im_id.numpy() if isinstance(im_id,
+ paddle.Tensor) else im_id
+
+ infer_results = get_infer_results(
+ outs, self.clsid2catid, bias=self.bias)
+ self.result_bbox += infer_results[
+ 'bbox'] if 'bbox' in infer_results else []
+ bbox = [b['bbox'] for b in self.result_bbox]
+ score = [b['score'] for b in self.result_bbox]
+ label = [b['category_id'] for b in self.result_bbox]
+ label = [self.catid2clsid[e] for e in label]
+ gt_box = [
+ e['bbox'] for e in self.gt_anno['annotations']
+ if e['image_id'] == outs['im_id']
+ ]
+ gt_label = [
+ e['category_id'] for e in self.gt_anno['annotations']
+ if e['image_id'] == outs['im_id']
+ ]
+ gt_label = [self.catid2clsid[e] for e in gt_label]
+ self.detection_map.update(bbox, score, label, gt_box, gt_label)
+
+ def accumulate(self):
+ if len(self.result_bbox) > 0:
+ output = "bbox.json"
+ if self.output_eval:
+ output = os.path.join(self.output_eval, output)
+ with open(output, 'w') as f:
+ json.dump(self.result_bbox, f)
+ logger.info('The bbox result is saved to bbox.json.')
+
+ if self.save_prediction_only:
+ logger.info('The bbox result is saved to {} and do not '
+ 'evaluate the mAP.'.format(output))
+ else:
+ logger.info("Accumulating evaluatation results...")
+ self.detection_map.accumulate()
+
+ def log(self):
+ map_stat = 100. * self.detection_map.get_map()
+ logger.info("mAP({:.2f}, {}) = {:.2f}%".format(self.overlap_thresh,
+ self.map_type, map_stat))
+
+ def get_results(self):
+ return {'bbox': [self.detection_map.get_map()]}
+
+
+class SNIPERCOCOMetric(COCOMetric):
+ def __init__(self, anno_file, **kwargs):
+ super(SNIPERCOCOMetric, self).__init__(anno_file, **kwargs)
+ self.dataset = kwargs["dataset"]
+ self.chip_results = []
+
+ def reset(self):
+ # only bbox and mask evaluation support currently
+ self.results = {'bbox': [], 'mask': [], 'segm': [], 'keypoint': []}
+ self.eval_results = {}
+ self.chip_results = []
+
+ def update(self, inputs, outputs):
+ outs = {}
+ # outputs Tensor -> numpy.ndarray
+ for k, v in outputs.items():
+ outs[k] = v.numpy() if isinstance(v, paddle.Tensor) else v
+
+ im_id = inputs['im_id']
+ outs['im_id'] = im_id.numpy() if isinstance(im_id,
+ paddle.Tensor) else im_id
+
+ self.chip_results.append(outs)
+
+
+ def accumulate(self):
+ results = self.dataset.anno_cropper.aggregate_chips_detections(self.chip_results)
+ for outs in results:
+ infer_results = get_infer_results(outs, self.clsid2catid, bias=self.bias)
+ self.results['bbox'] += infer_results['bbox'] if 'bbox' in infer_results else []
+
+ super(SNIPERCOCOMetric, self).accumulate()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/mot_metrics.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/mot_metrics.py
new file mode 100644
index 000000000..85cba3630
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/mot_metrics.py
@@ -0,0 +1,1232 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import copy
+import sys
+import math
+from collections import defaultdict
+import numpy as np
+import paddle
+import paddle.nn.functional as F
+from ppdet.modeling.bbox_utils import bbox_iou_np_expand
+from .map_utils import ap_per_class
+from .metrics import Metric
+from .munkres import Munkres
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['MOTEvaluator', 'MOTMetric', 'JDEDetMetric', 'KITTIMOTMetric']
+
+
+def read_mot_results(filename, is_gt=False, is_ignore=False):
+ valid_labels = {1}
+ ignore_labels = {2, 7, 8, 12} # only in motchallenge datasets like 'MOT16'
+ results_dict = dict()
+ if os.path.isfile(filename):
+ with open(filename, 'r') as f:
+ for line in f.readlines():
+ linelist = line.split(',')
+ if len(linelist) < 7:
+ continue
+ fid = int(linelist[0])
+ if fid < 1:
+ continue
+ results_dict.setdefault(fid, list())
+
+ box_size = float(linelist[4]) * float(linelist[5])
+
+ if is_gt:
+ label = int(float(linelist[7]))
+ mark = int(float(linelist[6]))
+ if mark == 0 or label not in valid_labels:
+ continue
+ score = 1
+ elif is_ignore:
+ if 'MOT16-' in filename or 'MOT17-' in filename or 'MOT15-' in filename or 'MOT20-' in filename:
+ label = int(float(linelist[7]))
+ vis_ratio = float(linelist[8])
+ if label not in ignore_labels and vis_ratio >= 0:
+ continue
+ else:
+ continue
+ score = 1
+ else:
+ score = float(linelist[6])
+
+ tlwh = tuple(map(float, linelist[2:6]))
+ target_id = int(linelist[1])
+
+ results_dict[fid].append((tlwh, target_id, score))
+ return results_dict
+
+
+"""
+MOT dataset label list, see in https://motchallenge.net
+labels={'ped', ... % 1
+ 'person_on_vhcl', ... % 2
+ 'car', ... % 3
+ 'bicycle', ... % 4
+ 'mbike', ... % 5
+ 'non_mot_vhcl', ... % 6
+ 'static_person', ... % 7
+ 'distractor', ... % 8
+ 'occluder', ... % 9
+ 'occluder_on_grnd', ... % 10
+ 'occluder_full', ... % 11
+ 'reflection', ... % 12
+ 'crowd' ... % 13
+};
+"""
+
+
+def unzip_objs(objs):
+ if len(objs) > 0:
+ tlwhs, ids, scores = zip(*objs)
+ else:
+ tlwhs, ids, scores = [], [], []
+ tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
+ return tlwhs, ids, scores
+
+
+class MOTEvaluator(object):
+ def __init__(self, data_root, seq_name, data_type):
+ self.data_root = data_root
+ self.seq_name = seq_name
+ self.data_type = data_type
+
+ self.load_annotations()
+ self.reset_accumulator()
+
+ def load_annotations(self):
+ assert self.data_type == 'mot'
+ gt_filename = os.path.join(self.data_root, self.seq_name, 'gt',
+ 'gt.txt')
+ self.gt_frame_dict = read_mot_results(gt_filename, is_gt=True)
+ self.gt_ignore_frame_dict = read_mot_results(
+ gt_filename, is_ignore=True)
+
+ def reset_accumulator(self):
+ import motmetrics as mm
+ mm.lap.default_solver = 'lap'
+ self.acc = mm.MOTAccumulator(auto_id=True)
+
+ def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
+ import motmetrics as mm
+ mm.lap.default_solver = 'lap'
+ # results
+ trk_tlwhs = np.copy(trk_tlwhs)
+ trk_ids = np.copy(trk_ids)
+
+ # gts
+ gt_objs = self.gt_frame_dict.get(frame_id, [])
+ gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
+
+ # ignore boxes
+ ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
+ ignore_tlwhs = unzip_objs(ignore_objs)[0]
+
+ # remove ignored results
+ keep = np.ones(len(trk_tlwhs), dtype=bool)
+ iou_distance = mm.distances.iou_matrix(
+ ignore_tlwhs, trk_tlwhs, max_iou=0.5)
+ if len(iou_distance) > 0:
+ match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
+ match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
+ match_ious = iou_distance[match_is, match_js]
+
+ match_js = np.asarray(match_js, dtype=int)
+ match_js = match_js[np.logical_not(np.isnan(match_ious))]
+ keep[match_js] = False
+ trk_tlwhs = trk_tlwhs[keep]
+ trk_ids = trk_ids[keep]
+
+ # get distance matrix
+ iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
+
+ # acc
+ self.acc.update(gt_ids, trk_ids, iou_distance)
+
+ if rtn_events and iou_distance.size > 0 and hasattr(self.acc,
+ 'last_mot_events'):
+ events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
+ else:
+ events = None
+ return events
+
+ def eval_file(self, filename):
+ self.reset_accumulator()
+
+ result_frame_dict = read_mot_results(filename, is_gt=False)
+ frames = sorted(list(set(result_frame_dict.keys())))
+ for frame_id in frames:
+ trk_objs = result_frame_dict.get(frame_id, [])
+ trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
+ self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
+
+ return self.acc
+
+ @staticmethod
+ def get_summary(accs,
+ names,
+ metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1',
+ 'precision', 'recall')):
+ import motmetrics as mm
+ mm.lap.default_solver = 'lap'
+ names = copy.deepcopy(names)
+ if metrics is None:
+ metrics = mm.metrics.motchallenge_metrics
+ metrics = copy.deepcopy(metrics)
+
+ mh = mm.metrics.create()
+ summary = mh.compute_many(
+ accs, metrics=metrics, names=names, generate_overall=True)
+ return summary
+
+ @staticmethod
+ def save_summary(summary, filename):
+ import pandas as pd
+ writer = pd.ExcelWriter(filename)
+ summary.to_excel(writer)
+ writer.save()
+
+
+class MOTMetric(Metric):
+ def __init__(self, save_summary=False):
+ self.save_summary = save_summary
+ self.MOTEvaluator = MOTEvaluator
+ self.result_root = None
+ self.reset()
+
+ def reset(self):
+ self.accs = []
+ self.seqs = []
+
+ def update(self, data_root, seq, data_type, result_root, result_filename):
+ evaluator = self.MOTEvaluator(data_root, seq, data_type)
+ self.accs.append(evaluator.eval_file(result_filename))
+ self.seqs.append(seq)
+ self.result_root = result_root
+
+ def accumulate(self):
+ import motmetrics as mm
+ import openpyxl
+ metrics = mm.metrics.motchallenge_metrics
+ mh = mm.metrics.create()
+ summary = self.MOTEvaluator.get_summary(self.accs, self.seqs, metrics)
+ self.strsummary = mm.io.render_summary(
+ summary,
+ formatters=mh.formatters,
+ namemap=mm.io.motchallenge_metric_names)
+ if self.save_summary:
+ self.MOTEvaluator.save_summary(
+ summary, os.path.join(self.result_root, 'summary.xlsx'))
+
+ def log(self):
+ print(self.strsummary)
+
+ def get_results(self):
+ return self.strsummary
+
+
+class JDEDetMetric(Metric):
+ # Note this detection AP metric is different from COCOMetric or VOCMetric,
+ # and the bboxes coordinates are not scaled to the original image
+ def __init__(self, overlap_thresh=0.5):
+ self.overlap_thresh = overlap_thresh
+ self.reset()
+
+ def reset(self):
+ self.AP_accum = np.zeros(1)
+ self.AP_accum_count = np.zeros(1)
+
+ def update(self, inputs, outputs):
+ bboxes = outputs['bbox'][:, 2:].numpy()
+ scores = outputs['bbox'][:, 1].numpy()
+ labels = outputs['bbox'][:, 0].numpy()
+ bbox_lengths = outputs['bbox_num'].numpy()
+ if bboxes.shape[0] == 1 and bboxes.sum() == 0.0:
+ return
+
+ gt_boxes = inputs['gt_bbox'].numpy()[0]
+ gt_labels = inputs['gt_class'].numpy()[0]
+ if gt_labels.shape[0] == 0:
+ return
+
+ correct = []
+ detected = []
+ for i in range(bboxes.shape[0]):
+ obj_pred = 0
+ pred_bbox = bboxes[i].reshape(1, 4)
+ # Compute iou with target boxes
+ iou = bbox_iou_np_expand(pred_bbox, gt_boxes, x1y1x2y2=True)[0]
+ # Extract index of largest overlap
+ best_i = np.argmax(iou)
+ # If overlap exceeds threshold and classification is correct mark as correct
+ if iou[best_i] > self.overlap_thresh and obj_pred == gt_labels[
+ best_i] and best_i not in detected:
+ correct.append(1)
+ detected.append(best_i)
+ else:
+ correct.append(0)
+
+ # Compute Average Precision (AP) per class
+ target_cls = list(gt_labels.T[0])
+ AP, AP_class, R, P = ap_per_class(
+ tp=correct,
+ conf=scores,
+ pred_cls=np.zeros_like(scores),
+ target_cls=target_cls)
+ self.AP_accum_count += np.bincount(AP_class, minlength=1)
+ self.AP_accum += np.bincount(AP_class, minlength=1, weights=AP)
+
+ def accumulate(self):
+ logger.info("Accumulating evaluatation results...")
+ self.map_stat = self.AP_accum[0] / (self.AP_accum_count[0] + 1E-16)
+
+ def log(self):
+ map_stat = 100. * self.map_stat
+ logger.info("mAP({:.2f}) = {:.2f}%".format(self.overlap_thresh,
+ map_stat))
+
+ def get_results(self):
+ return self.map_stat
+
+
+"""
+Following code is borrow from https://github.com/xingyizhou/CenterTrack/blob/master/src/tools/eval_kitti_track/evaluate_tracking.py
+"""
+
+
+class tData:
+ """
+ Utility class to load data.
+ """
+ def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\
+ obs_angle=-10,x1=-1,y1=-1,x2=-1,y2=-1,w=-1,h=-1,l=-1,\
+ X=-1000,Y=-1000,Z=-1000,yaw=-10,score=-1000,track_id=-1):
+ """
+ Constructor, initializes the object given the parameters.
+ """
+ self.frame = frame
+ self.track_id = track_id
+ self.obj_type = obj_type
+ self.truncation = truncation
+ self.occlusion = occlusion
+ self.obs_angle = obs_angle
+ self.x1 = x1
+ self.y1 = y1
+ self.x2 = x2
+ self.y2 = y2
+ self.w = w
+ self.h = h
+ self.l = l
+ self.X = X
+ self.Y = Y
+ self.Z = Z
+ self.yaw = yaw
+ self.score = score
+ self.ignored = False
+ self.valid = False
+ self.tracker = -1
+
+ def __str__(self):
+ attrs = vars(self)
+ return '\n'.join("%s: %s" % item for item in attrs.items())
+
+
+class KITTIEvaluation(object):
+ """ KITTI tracking statistics (CLEAR MOT, id-switches, fragments, ML/PT/MT, precision/recall)
+ MOTA - Multi-object tracking accuracy in [0,100]
+ MOTP - Multi-object tracking precision in [0,100] (3D) / [td,100] (2D)
+ MOTAL - Multi-object tracking accuracy in [0,100] with log10(id-switches)
+
+ id-switches - number of id switches
+ fragments - number of fragmentations
+
+ MT, PT, ML - number of mostly tracked, partially tracked and mostly lost trajectories
+
+ recall - recall = percentage of detected targets
+ precision - precision = percentage of correctly detected targets
+ FAR - number of false alarms per frame
+ falsepositives - number of false positives (FP)
+ missed - number of missed targets (FN)
+ """
+ def __init__(self, result_path, gt_path, min_overlap=0.5, max_truncation = 0,\
+ min_height = 25, max_occlusion = 2, cls="car",\
+ n_frames=[], seqs=[], n_sequences=0):
+ # get number of sequences and
+ # get number of frames per sequence from test mapping
+ # (created while extracting the benchmark)
+ self.gt_path = os.path.join(gt_path, "../labels")
+ self.n_frames = n_frames
+ self.sequence_name = seqs
+ self.n_sequences = n_sequences
+
+ self.cls = cls # class to evaluate, i.e. pedestrian or car
+
+ self.result_path = result_path
+
+ # statistics and numbers for evaluation
+ self.n_gt = 0 # number of ground truth detections minus ignored false negatives and true positives
+ self.n_igt = 0 # number of ignored ground truth detections
+ self.n_gts = [
+ ] # number of ground truth detections minus ignored false negatives and true positives PER SEQUENCE
+ self.n_igts = [
+ ] # number of ground ignored truth detections PER SEQUENCE
+ self.n_gt_trajectories = 0
+ self.n_gt_seq = []
+ self.n_tr = 0 # number of tracker detections minus ignored tracker detections
+ self.n_trs = [
+ ] # number of tracker detections minus ignored tracker detections PER SEQUENCE
+ self.n_itr = 0 # number of ignored tracker detections
+ self.n_itrs = [] # number of ignored tracker detections PER SEQUENCE
+ self.n_igttr = 0 # number of ignored ground truth detections where the corresponding associated tracker detection is also ignored
+ self.n_tr_trajectories = 0
+ self.n_tr_seq = []
+ self.MOTA = 0
+ self.MOTP = 0
+ self.MOTAL = 0
+ self.MODA = 0
+ self.MODP = 0
+ self.MODP_t = []
+ self.recall = 0
+ self.precision = 0
+ self.F1 = 0
+ self.FAR = 0
+ self.total_cost = 0
+ self.itp = 0 # number of ignored true positives
+ self.itps = [] # number of ignored true positives PER SEQUENCE
+ self.tp = 0 # number of true positives including ignored true positives!
+ self.tps = [
+ ] # number of true positives including ignored true positives PER SEQUENCE
+ self.fn = 0 # number of false negatives WITHOUT ignored false negatives
+ self.fns = [
+ ] # number of false negatives WITHOUT ignored false negatives PER SEQUENCE
+ self.ifn = 0 # number of ignored false negatives
+ self.ifns = [] # number of ignored false negatives PER SEQUENCE
+ self.fp = 0 # number of false positives
+ # a bit tricky, the number of ignored false negatives and ignored true positives
+ # is subtracted, but if both tracker detection and ground truth detection
+ # are ignored this number is added again to avoid double counting
+ self.fps = [] # above PER SEQUENCE
+ self.mme = 0
+ self.fragments = 0
+ self.id_switches = 0
+ self.MT = 0
+ self.PT = 0
+ self.ML = 0
+
+ self.min_overlap = min_overlap # minimum bounding box overlap for 3rd party metrics
+ self.max_truncation = max_truncation # maximum truncation of an object for evaluation
+ self.max_occlusion = max_occlusion # maximum occlusion of an object for evaluation
+ self.min_height = min_height # minimum height of an object for evaluation
+ self.n_sample_points = 500
+
+ # this should be enough to hold all groundtruth trajectories
+ # is expanded if necessary and reduced in any case
+ self.gt_trajectories = [[] for x in range(self.n_sequences)]
+ self.ign_trajectories = [[] for x in range(self.n_sequences)]
+
+ def loadGroundtruth(self):
+ try:
+ self._loadData(self.gt_path, cls=self.cls, loading_groundtruth=True)
+ except IOError:
+ return False
+ return True
+
+ def loadTracker(self):
+ try:
+ if not self._loadData(
+ self.result_path, cls=self.cls, loading_groundtruth=False):
+ return False
+ except IOError:
+ return False
+ return True
+
+ def _loadData(self,
+ root_dir,
+ cls,
+ min_score=-1000,
+ loading_groundtruth=False):
+ """
+ Generic loader for ground truth and tracking data.
+ Use loadGroundtruth() or loadTracker() to load this data.
+ Loads detections in KITTI format from textfiles.
+ """
+ # construct objectDetections object to hold detection data
+ t_data = tData()
+ data = []
+ eval_2d = True
+ eval_3d = True
+
+ seq_data = []
+ n_trajectories = 0
+ n_trajectories_seq = []
+ for seq, s_name in enumerate(self.sequence_name):
+ i = 0
+ filename = os.path.join(root_dir, "%s.txt" % s_name)
+ f = open(filename, "r")
+
+ f_data = [
+ [] for x in range(self.n_frames[seq])
+ ] # current set has only 1059 entries, sufficient length is checked anyway
+ ids = []
+ n_in_seq = 0
+ id_frame_cache = []
+ for line in f:
+ # KITTI tracking benchmark data format:
+ # (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry)
+ line = line.strip()
+ fields = line.split(" ")
+ # classes that should be loaded (ignored neighboring classes)
+ if "car" in cls.lower():
+ classes = ["car", "van"]
+ elif "pedestrian" in cls.lower():
+ classes = ["pedestrian", "person_sitting"]
+ else:
+ classes = [cls.lower()]
+ classes += ["dontcare"]
+ if not any([s for s in classes if s in fields[2].lower()]):
+ continue
+ # get fields from table
+ t_data.frame = int(float(fields[0])) # frame
+ t_data.track_id = int(float(fields[1])) # id
+ t_data.obj_type = fields[
+ 2].lower() # object type [car, pedestrian, cyclist, ...]
+ t_data.truncation = int(
+ float(fields[3])) # truncation [-1,0,1,2]
+ t_data.occlusion = int(
+ float(fields[4])) # occlusion [-1,0,1,2]
+ t_data.obs_angle = float(fields[5]) # observation angle [rad]
+ t_data.x1 = float(fields[6]) # left [px]
+ t_data.y1 = float(fields[7]) # top [px]
+ t_data.x2 = float(fields[8]) # right [px]
+ t_data.y2 = float(fields[9]) # bottom [px]
+ t_data.h = float(fields[10]) # height [m]
+ t_data.w = float(fields[11]) # width [m]
+ t_data.l = float(fields[12]) # length [m]
+ t_data.X = float(fields[13]) # X [m]
+ t_data.Y = float(fields[14]) # Y [m]
+ t_data.Z = float(fields[15]) # Z [m]
+ t_data.yaw = float(fields[16]) # yaw angle [rad]
+ if not loading_groundtruth:
+ if len(fields) == 17:
+ t_data.score = -1
+ elif len(fields) == 18:
+ t_data.score = float(fields[17]) # detection score
+ else:
+ logger.info("file is not in KITTI format")
+ return
+
+ # do not consider objects marked as invalid
+ if t_data.track_id is -1 and t_data.obj_type != "dontcare":
+ continue
+
+ idx = t_data.frame
+ # check if length for frame data is sufficient
+ if idx >= len(f_data):
+ print("extend f_data", idx, len(f_data))
+ f_data += [[] for x in range(max(500, idx - len(f_data)))]
+ try:
+ id_frame = (t_data.frame, t_data.track_id)
+ if id_frame in id_frame_cache and not loading_groundtruth:
+ logger.info(
+ "track ids are not unique for sequence %d: frame %d"
+ % (seq, t_data.frame))
+ logger.info(
+ "track id %d occured at least twice for this frame"
+ % t_data.track_id)
+ logger.info("Exiting...")
+ #continue # this allows to evaluate non-unique result files
+ return False
+ id_frame_cache.append(id_frame)
+ f_data[t_data.frame].append(copy.copy(t_data))
+ except:
+ print(len(f_data), idx)
+ raise
+
+ if t_data.track_id not in ids and t_data.obj_type != "dontcare":
+ ids.append(t_data.track_id)
+ n_trajectories += 1
+ n_in_seq += 1
+
+ # check if uploaded data provides information for 2D and 3D evaluation
+ if not loading_groundtruth and eval_2d is True and (
+ t_data.x1 == -1 or t_data.x2 == -1 or t_data.y1 == -1 or
+ t_data.y2 == -1):
+ eval_2d = False
+ if not loading_groundtruth and eval_3d is True and (
+ t_data.X == -1000 or t_data.Y == -1000 or
+ t_data.Z == -1000):
+ eval_3d = False
+
+ # only add existing frames
+ n_trajectories_seq.append(n_in_seq)
+ seq_data.append(f_data)
+ f.close()
+
+ if not loading_groundtruth:
+ self.tracker = seq_data
+ self.n_tr_trajectories = n_trajectories
+ self.eval_2d = eval_2d
+ self.eval_3d = eval_3d
+ self.n_tr_seq = n_trajectories_seq
+ if self.n_tr_trajectories == 0:
+ return False
+ else:
+ # split ground truth and DontCare areas
+ self.dcareas = []
+ self.groundtruth = []
+ for seq_idx in range(len(seq_data)):
+ seq_gt = seq_data[seq_idx]
+ s_g, s_dc = [], []
+ for f in range(len(seq_gt)):
+ all_gt = seq_gt[f]
+ g, dc = [], []
+ for gg in all_gt:
+ if gg.obj_type == "dontcare":
+ dc.append(gg)
+ else:
+ g.append(gg)
+ s_g.append(g)
+ s_dc.append(dc)
+ self.dcareas.append(s_dc)
+ self.groundtruth.append(s_g)
+ self.n_gt_seq = n_trajectories_seq
+ self.n_gt_trajectories = n_trajectories
+ return True
+
+ def boxoverlap(self, a, b, criterion="union"):
+ """
+ boxoverlap computes intersection over union for bbox a and b in KITTI format.
+ If the criterion is 'union', overlap = (a inter b) / a union b).
+ If the criterion is 'a', overlap = (a inter b) / a, where b should be a dontcare area.
+ """
+ x1 = max(a.x1, b.x1)
+ y1 = max(a.y1, b.y1)
+ x2 = min(a.x2, b.x2)
+ y2 = min(a.y2, b.y2)
+
+ w = x2 - x1
+ h = y2 - y1
+
+ if w <= 0. or h <= 0.:
+ return 0.
+ inter = w * h
+ aarea = (a.x2 - a.x1) * (a.y2 - a.y1)
+ barea = (b.x2 - b.x1) * (b.y2 - b.y1)
+ # intersection over union overlap
+ if criterion.lower() == "union":
+ o = inter / float(aarea + barea - inter)
+ elif criterion.lower() == "a":
+ o = float(inter) / float(aarea)
+ else:
+ raise TypeError("Unkown type for criterion")
+ return o
+
+ def compute3rdPartyMetrics(self):
+ """
+ Computes the metrics defined in
+ - Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics
+ MOTA, MOTAL, MOTP
+ - Nevatia 2008: Global Data Association for Multi-Object Tracking Using Network Flows
+ MT/PT/ML
+ """
+ # construct Munkres object for Hungarian Method association
+ hm = Munkres()
+ max_cost = 1e9
+
+ # go through all frames and associate ground truth and tracker results
+ # groundtruth and tracker contain lists for every single frame containing lists of KITTI format detections
+ fr, ids = 0, 0
+ for seq_idx in range(len(self.groundtruth)):
+ seq_gt = self.groundtruth[seq_idx]
+ seq_dc = self.dcareas[seq_idx] # don't care areas
+ seq_tracker = self.tracker[seq_idx]
+ seq_trajectories = defaultdict(list)
+ seq_ignored = defaultdict(list)
+
+ # statistics over the current sequence, check the corresponding
+ # variable comments in __init__ to get their meaning
+ seqtp = 0
+ seqitp = 0
+ seqfn = 0
+ seqifn = 0
+ seqfp = 0
+ seqigt = 0
+ seqitr = 0
+
+ last_ids = [[], []]
+ n_gts = 0
+ n_trs = 0
+
+ for f in range(len(seq_gt)):
+ g = seq_gt[f]
+ dc = seq_dc[f]
+
+ t = seq_tracker[f]
+ # counting total number of ground truth and tracker objects
+ self.n_gt += len(g)
+ self.n_tr += len(t)
+
+ n_gts += len(g)
+ n_trs += len(t)
+
+ # use hungarian method to associate, using boxoverlap 0..1 as cost
+ # build cost matrix
+ cost_matrix = []
+ this_ids = [[], []]
+ for gg in g:
+ # save current ids
+ this_ids[0].append(gg.track_id)
+ this_ids[1].append(-1)
+ gg.tracker = -1
+ gg.id_switch = 0
+ gg.fragmentation = 0
+ cost_row = []
+ for tt in t:
+ # overlap == 1 is cost ==0
+ c = 1 - self.boxoverlap(gg, tt)
+ # gating for boxoverlap
+ if c <= self.min_overlap:
+ cost_row.append(c)
+ else:
+ cost_row.append(max_cost) # = 1e9
+ cost_matrix.append(cost_row)
+ # all ground truth trajectories are initially not associated
+ # extend groundtruth trajectories lists (merge lists)
+ seq_trajectories[gg.track_id].append(-1)
+ seq_ignored[gg.track_id].append(False)
+
+ if len(g) is 0:
+ cost_matrix = [[]]
+ # associate
+ association_matrix = hm.compute(cost_matrix)
+
+ # tmp variables for sanity checks and MODP computation
+ tmptp = 0
+ tmpfp = 0
+ tmpfn = 0
+ tmpc = 0 # this will sum up the overlaps for all true positives
+ tmpcs = [0] * len(
+ g) # this will save the overlaps for all true positives
+ # the reason is that some true positives might be ignored
+ # later such that the corrsponding overlaps can
+ # be subtracted from tmpc for MODP computation
+
+ # mapping for tracker ids and ground truth ids
+ for row, col in association_matrix:
+ # apply gating on boxoverlap
+ c = cost_matrix[row][col]
+ if c < max_cost:
+ g[row].tracker = t[col].track_id
+ this_ids[1][row] = t[col].track_id
+ t[col].valid = True
+ g[row].distance = c
+ self.total_cost += 1 - c
+ tmpc += 1 - c
+ tmpcs[row] = 1 - c
+ seq_trajectories[g[row].track_id][-1] = t[col].track_id
+
+ # true positives are only valid associations
+ self.tp += 1
+ tmptp += 1
+ else:
+ g[row].tracker = -1
+ self.fn += 1
+ tmpfn += 1
+
+ # associate tracker and DontCare areas
+ # ignore tracker in neighboring classes
+ nignoredtracker = 0 # number of ignored tracker detections
+ ignoredtrackers = dict() # will associate the track_id with -1
+ # if it is not ignored and 1 if it is
+ # ignored;
+ # this is used to avoid double counting ignored
+ # cases, see the next loop
+
+ for tt in t:
+ ignoredtrackers[tt.track_id] = -1
+ # ignore detection if it belongs to a neighboring class or is
+ # smaller or equal to the minimum height
+
+ tt_height = abs(tt.y1 - tt.y2)
+ if ((self.cls == "car" and tt.obj_type == "van") or
+ (self.cls == "pedestrian" and
+ tt.obj_type == "person_sitting") or
+ tt_height <= self.min_height) and not tt.valid:
+ nignoredtracker += 1
+ tt.ignored = True
+ ignoredtrackers[tt.track_id] = 1
+ continue
+ for d in dc:
+ overlap = self.boxoverlap(tt, d, "a")
+ if overlap > 0.5 and not tt.valid:
+ tt.ignored = True
+ nignoredtracker += 1
+ ignoredtrackers[tt.track_id] = 1
+ break
+
+ # check for ignored FN/TP (truncation or neighboring object class)
+ ignoredfn = 0 # the number of ignored false negatives
+ nignoredtp = 0 # the number of ignored true positives
+ nignoredpairs = 0 # the number of ignored pairs, i.e. a true positive
+ # which is ignored but where the associated tracker
+ # detection has already been ignored
+
+ gi = 0
+ for gg in g:
+ if gg.tracker < 0:
+ if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
+ or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
+ seq_ignored[gg.track_id][-1] = True
+ gg.ignored = True
+ ignoredfn += 1
+
+ elif gg.tracker >= 0:
+ if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
+ or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
+
+ seq_ignored[gg.track_id][-1] = True
+ gg.ignored = True
+ nignoredtp += 1
+
+ # if the associated tracker detection is already ignored,
+ # we want to avoid double counting ignored detections
+ if ignoredtrackers[gg.tracker] > 0:
+ nignoredpairs += 1
+
+ # for computing MODP, the overlaps from ignored detections
+ # are subtracted
+ tmpc -= tmpcs[gi]
+ gi += 1
+
+ # the below might be confusion, check the comments in __init__
+ # to see what the individual statistics represent
+
+ # correct TP by number of ignored TP due to truncation
+ # ignored TP are shown as tracked in visualization
+ tmptp -= nignoredtp
+
+ # count the number of ignored true positives
+ self.itp += nignoredtp
+
+ # adjust the number of ground truth objects considered
+ self.n_gt -= (ignoredfn + nignoredtp)
+
+ # count the number of ignored ground truth objects
+ self.n_igt += ignoredfn + nignoredtp
+
+ # count the number of ignored tracker objects
+ self.n_itr += nignoredtracker
+
+ # count the number of ignored pairs, i.e. associated tracker and
+ # ground truth objects that are both ignored
+ self.n_igttr += nignoredpairs
+
+ # false negatives = associated gt bboxes exceding association threshold + non-associated gt bboxes
+ tmpfn += len(g) - len(association_matrix) - ignoredfn
+ self.fn += len(g) - len(association_matrix) - ignoredfn
+ self.ifn += ignoredfn
+
+ # false positives = tracker bboxes - associated tracker bboxes
+ # mismatches (mme_t)
+ tmpfp += len(
+ t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
+ self.fp += len(
+ t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
+
+ # update sequence data
+ seqtp += tmptp
+ seqitp += nignoredtp
+ seqfp += tmpfp
+ seqfn += tmpfn
+ seqifn += ignoredfn
+ seqigt += ignoredfn + nignoredtp
+ seqitr += nignoredtracker
+
+ # sanity checks
+ # - the number of true positives minues ignored true positives
+ # should be greater or equal to 0
+ # - the number of false negatives should be greater or equal to 0
+ # - the number of false positives needs to be greater or equal to 0
+ # otherwise ignored detections might be counted double
+ # - the number of counted true positives (plus ignored ones)
+ # and the number of counted false negatives (plus ignored ones)
+ # should match the total number of ground truth objects
+ # - the number of counted true positives (plus ignored ones)
+ # and the number of counted false positives
+ # plus the number of ignored tracker detections should
+ # match the total number of tracker detections; note that
+ # nignoredpairs is subtracted here to avoid double counting
+ # of ignored detection sin nignoredtp and nignoredtracker
+ if tmptp < 0:
+ print(tmptp, nignoredtp)
+ raise NameError("Something went wrong! TP is negative")
+ if tmpfn < 0:
+ print(tmpfn,
+ len(g),
+ len(association_matrix), ignoredfn, nignoredpairs)
+ raise NameError("Something went wrong! FN is negative")
+ if tmpfp < 0:
+ print(tmpfp,
+ len(t), tmptp, nignoredtracker, nignoredtp,
+ nignoredpairs)
+ raise NameError("Something went wrong! FP is negative")
+ if tmptp + tmpfn is not len(g) - ignoredfn - nignoredtp:
+ print("seqidx", seq_idx)
+ print("frame ", f)
+ print("TP ", tmptp)
+ print("FN ", tmpfn)
+ print("FP ", tmpfp)
+ print("nGT ", len(g))
+ print("nAss ", len(association_matrix))
+ print("ign GT", ignoredfn)
+ print("ign TP", nignoredtp)
+ raise NameError(
+ "Something went wrong! nGroundtruth is not TP+FN")
+ if tmptp + tmpfp + nignoredtp + nignoredtracker - nignoredpairs is not len(
+ t):
+ print(seq_idx, f, len(t), tmptp, tmpfp)
+ print(len(association_matrix), association_matrix)
+ raise NameError(
+ "Something went wrong! nTracker is not TP+FP")
+
+ # check for id switches or fragmentations
+ for i, tt in enumerate(this_ids[0]):
+ if tt in last_ids[0]:
+ idx = last_ids[0].index(tt)
+ tid = this_ids[1][i]
+ lid = last_ids[1][idx]
+ if tid != lid and lid != -1 and tid != -1:
+ if g[i].truncation < self.max_truncation:
+ g[i].id_switch = 1
+ ids += 1
+ if tid != lid and lid != -1:
+ if g[i].truncation < self.max_truncation:
+ g[i].fragmentation = 1
+ fr += 1
+
+ # save current index
+ last_ids = this_ids
+ # compute MOTP_t
+ MODP_t = 1
+ if tmptp != 0:
+ MODP_t = tmpc / float(tmptp)
+ self.MODP_t.append(MODP_t)
+
+ # remove empty lists for current gt trajectories
+ self.gt_trajectories[seq_idx] = seq_trajectories
+ self.ign_trajectories[seq_idx] = seq_ignored
+
+ # gather statistics for "per sequence" statistics.
+ self.n_gts.append(n_gts)
+ self.n_trs.append(n_trs)
+ self.tps.append(seqtp)
+ self.itps.append(seqitp)
+ self.fps.append(seqfp)
+ self.fns.append(seqfn)
+ self.ifns.append(seqifn)
+ self.n_igts.append(seqigt)
+ self.n_itrs.append(seqitr)
+
+ # compute MT/PT/ML, fragments, idswitches for all groundtruth trajectories
+ n_ignored_tr_total = 0
+ for seq_idx, (
+ seq_trajectories, seq_ignored
+ ) in enumerate(zip(self.gt_trajectories, self.ign_trajectories)):
+ if len(seq_trajectories) == 0:
+ continue
+ tmpMT, tmpML, tmpPT, tmpId_switches, tmpFragments = [0] * 5
+ n_ignored_tr = 0
+ for g, ign_g in zip(seq_trajectories.values(),
+ seq_ignored.values()):
+ # all frames of this gt trajectory are ignored
+ if all(ign_g):
+ n_ignored_tr += 1
+ n_ignored_tr_total += 1
+ continue
+ # all frames of this gt trajectory are not assigned to any detections
+ if all([this == -1 for this in g]):
+ tmpML += 1
+ self.ML += 1
+ continue
+ # compute tracked frames in trajectory
+ last_id = g[0]
+ # first detection (necessary to be in gt_trajectories) is always tracked
+ tracked = 1 if g[0] >= 0 else 0
+ lgt = 0 if ign_g[0] else 1
+ for f in range(1, len(g)):
+ if ign_g[f]:
+ last_id = -1
+ continue
+ lgt += 1
+ if last_id != g[f] and last_id != -1 and g[f] != -1 and g[
+ f - 1] != -1:
+ tmpId_switches += 1
+ self.id_switches += 1
+ if f < len(g) - 1 and g[f - 1] != g[
+ f] and last_id != -1 and g[f] != -1 and g[f +
+ 1] != -1:
+ tmpFragments += 1
+ self.fragments += 1
+ if g[f] != -1:
+ tracked += 1
+ last_id = g[f]
+ # handle last frame; tracked state is handled in for loop (g[f]!=-1)
+ if len(g) > 1 and g[f - 1] != g[f] and last_id != -1 and g[
+ f] != -1 and not ign_g[f]:
+ tmpFragments += 1
+ self.fragments += 1
+
+ # compute MT/PT/ML
+ tracking_ratio = tracked / float(len(g) - sum(ign_g))
+ if tracking_ratio > 0.8:
+ tmpMT += 1
+ self.MT += 1
+ elif tracking_ratio < 0.2:
+ tmpML += 1
+ self.ML += 1
+ else: # 0.2 <= tracking_ratio <= 0.8
+ tmpPT += 1
+ self.PT += 1
+
+ if (self.n_gt_trajectories - n_ignored_tr_total) == 0:
+ self.MT = 0.
+ self.PT = 0.
+ self.ML = 0.
+ else:
+ self.MT /= float(self.n_gt_trajectories - n_ignored_tr_total)
+ self.PT /= float(self.n_gt_trajectories - n_ignored_tr_total)
+ self.ML /= float(self.n_gt_trajectories - n_ignored_tr_total)
+
+ # precision/recall etc.
+ if (self.fp + self.tp) == 0 or (self.tp + self.fn) == 0:
+ self.recall = 0.
+ self.precision = 0.
+ else:
+ self.recall = self.tp / float(self.tp + self.fn)
+ self.precision = self.tp / float(self.fp + self.tp)
+ if (self.recall + self.precision) == 0:
+ self.F1 = 0.
+ else:
+ self.F1 = 2. * (self.precision * self.recall) / (
+ self.precision + self.recall)
+ if sum(self.n_frames) == 0:
+ self.FAR = "n/a"
+ else:
+ self.FAR = self.fp / float(sum(self.n_frames))
+
+ # compute CLEARMOT
+ if self.n_gt == 0:
+ self.MOTA = -float("inf")
+ self.MODA = -float("inf")
+ else:
+ self.MOTA = 1 - (self.fn + self.fp + self.id_switches
+ ) / float(self.n_gt)
+ self.MODA = 1 - (self.fn + self.fp) / float(self.n_gt)
+ if self.tp == 0:
+ self.MOTP = float("inf")
+ else:
+ self.MOTP = self.total_cost / float(self.tp)
+ if self.n_gt != 0:
+ if self.id_switches == 0:
+ self.MOTAL = 1 - (self.fn + self.fp + self.id_switches
+ ) / float(self.n_gt)
+ else:
+ self.MOTAL = 1 - (self.fn + self.fp +
+ math.log10(self.id_switches)
+ ) / float(self.n_gt)
+ else:
+ self.MOTAL = -float("inf")
+ if sum(self.n_frames) == 0:
+ self.MODP = "n/a"
+ else:
+ self.MODP = sum(self.MODP_t) / float(sum(self.n_frames))
+ return True
+
+ def createSummary(self):
+ summary = ""
+ summary += "tracking evaluation summary".center(80, "=") + "\n"
+ summary += self.printEntry("Multiple Object Tracking Accuracy (MOTA)",
+ self.MOTA) + "\n"
+ summary += self.printEntry("Multiple Object Tracking Precision (MOTP)",
+ self.MOTP) + "\n"
+ summary += self.printEntry("Multiple Object Tracking Accuracy (MOTAL)",
+ self.MOTAL) + "\n"
+ summary += self.printEntry("Multiple Object Detection Accuracy (MODA)",
+ self.MODA) + "\n"
+ summary += self.printEntry("Multiple Object Detection Precision (MODP)",
+ self.MODP) + "\n"
+ summary += "\n"
+ summary += self.printEntry("Recall", self.recall) + "\n"
+ summary += self.printEntry("Precision", self.precision) + "\n"
+ summary += self.printEntry("F1", self.F1) + "\n"
+ summary += self.printEntry("False Alarm Rate", self.FAR) + "\n"
+ summary += "\n"
+ summary += self.printEntry("Mostly Tracked", self.MT) + "\n"
+ summary += self.printEntry("Partly Tracked", self.PT) + "\n"
+ summary += self.printEntry("Mostly Lost", self.ML) + "\n"
+ summary += "\n"
+ summary += self.printEntry("True Positives", self.tp) + "\n"
+ #summary += self.printEntry("True Positives per Sequence", self.tps) + "\n"
+ summary += self.printEntry("Ignored True Positives", self.itp) + "\n"
+ #summary += self.printEntry("Ignored True Positives per Sequence", self.itps) + "\n"
+
+ summary += self.printEntry("False Positives", self.fp) + "\n"
+ #summary += self.printEntry("False Positives per Sequence", self.fps) + "\n"
+ summary += self.printEntry("False Negatives", self.fn) + "\n"
+ #summary += self.printEntry("False Negatives per Sequence", self.fns) + "\n"
+ summary += self.printEntry("ID-switches", self.id_switches) + "\n"
+ self.fp = self.fp / self.n_gt
+ self.fn = self.fn / self.n_gt
+ self.id_switches = self.id_switches / self.n_gt
+ summary += self.printEntry("False Positives Ratio", self.fp) + "\n"
+ #summary += self.printEntry("False Positives per Sequence", self.fps) + "\n"
+ summary += self.printEntry("False Negatives Ratio", self.fn) + "\n"
+ #summary += self.printEntry("False Negatives per Sequence", self.fns) + "\n"
+ summary += self.printEntry("Ignored False Negatives Ratio",
+ self.ifn) + "\n"
+
+ #summary += self.printEntry("Ignored False Negatives per Sequence", self.ifns) + "\n"
+ summary += self.printEntry("Missed Targets", self.fn) + "\n"
+ summary += self.printEntry("ID-switches", self.id_switches) + "\n"
+ summary += self.printEntry("Fragmentations", self.fragments) + "\n"
+ summary += "\n"
+ summary += self.printEntry("Ground Truth Objects (Total)", self.n_gt +
+ self.n_igt) + "\n"
+ #summary += self.printEntry("Ground Truth Objects (Total) per Sequence", self.n_gts) + "\n"
+ summary += self.printEntry("Ignored Ground Truth Objects",
+ self.n_igt) + "\n"
+ #summary += self.printEntry("Ignored Ground Truth Objects per Sequence", self.n_igts) + "\n"
+ summary += self.printEntry("Ground Truth Trajectories",
+ self.n_gt_trajectories) + "\n"
+ summary += "\n"
+ summary += self.printEntry("Tracker Objects (Total)", self.n_tr) + "\n"
+ #summary += self.printEntry("Tracker Objects (Total) per Sequence", self.n_trs) + "\n"
+ summary += self.printEntry("Ignored Tracker Objects", self.n_itr) + "\n"
+ #summary += self.printEntry("Ignored Tracker Objects per Sequence", self.n_itrs) + "\n"
+ summary += self.printEntry("Tracker Trajectories",
+ self.n_tr_trajectories) + "\n"
+ #summary += "\n"
+ #summary += self.printEntry("Ignored Tracker Objects with Associated Ignored Ground Truth Objects", self.n_igttr) + "\n"
+ summary += "=" * 80
+ return summary
+
+ def printEntry(self, key, val, width=(70, 10)):
+ """
+ Pretty print an entry in a table fashion.
+ """
+ s_out = key.ljust(width[0])
+ if type(val) == int:
+ s = "%%%dd" % width[1]
+ s_out += s % val
+ elif type(val) == float:
+ s = "%%%df" % (width[1])
+ s_out += s % val
+ else:
+ s_out += ("%s" % val).rjust(width[1])
+ return s_out
+
+ def saveToStats(self, save_summary):
+ """
+ Save the statistics in a whitespace separate file.
+ """
+ summary = self.createSummary()
+ if save_summary:
+ filename = os.path.join(self.result_path,
+ "summary_%s.txt" % self.cls)
+ dump = open(filename, "w+")
+ dump.write(summary)
+ dump.close()
+ return summary
+
+
+class KITTIMOTMetric(Metric):
+ def __init__(self, save_summary=True):
+ self.save_summary = save_summary
+ self.MOTEvaluator = KITTIEvaluation
+ self.result_root = None
+ self.reset()
+
+ def reset(self):
+ self.seqs = []
+ self.n_sequences = 0
+ self.n_frames = []
+ self.strsummary = ''
+
+ def update(self, data_root, seq, data_type, result_root, result_filename):
+ assert data_type == 'kitti', "data_type should 'kitti'"
+ self.result_root = result_root
+ self.gt_path = data_root
+ gt_path = '{}/../labels/{}.txt'.format(data_root, seq)
+ gt = open(gt_path, "r")
+ max_frame = 0
+ for line in gt:
+ line = line.strip()
+ line_list = line.split(" ")
+ if int(line_list[0]) > max_frame:
+ max_frame = int(line_list[0])
+ rs = open(result_filename, "r")
+ for line in rs:
+ line = line.strip()
+ line_list = line.split(" ")
+ if int(line_list[0]) > max_frame:
+ max_frame = int(line_list[0])
+ gt.close()
+ rs.close()
+ self.n_frames.append(max_frame + 1)
+ self.seqs.append(seq)
+ self.n_sequences += 1
+
+ def accumulate(self):
+ logger.info("Processing Result for KITTI Tracking Benchmark")
+ e = self.MOTEvaluator(result_path=self.result_root, gt_path=self.gt_path,\
+ n_frames=self.n_frames, seqs=self.seqs, n_sequences=self.n_sequences)
+ try:
+ if not e.loadTracker():
+ return
+ logger.info("Loading Results - Success")
+ logger.info("Evaluate Object Class: %s" % c.upper())
+ except:
+ logger.info("Caught exception while loading result data.")
+ if not e.loadGroundtruth():
+ raise ValueError("Ground truth not found.")
+ logger.info("Loading Groundtruth - Success")
+ # sanity checks
+ if len(e.groundtruth) is not len(e.tracker):
+ logger.info(
+ "The uploaded data does not provide results for every sequence.")
+ return False
+ logger.info("Loaded %d Sequences." % len(e.groundtruth))
+ logger.info("Start Evaluation...")
+
+ if e.compute3rdPartyMetrics():
+ self.strsummary = e.saveToStats(self.save_summary)
+ else:
+ logger.info(
+ "There seem to be no true positives or false positives at all in the submitted data."
+ )
+
+ def log(self):
+ print(self.strsummary)
+
+ def get_results(self):
+ return self.strsummary
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/munkres.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/munkres.py
new file mode 100644
index 000000000..fbd4a92d2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/munkres.py
@@ -0,0 +1,428 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is borrow from https://github.com/xingyizhou/CenterTrack/blob/master/src/tools/eval_kitti_track/munkres.py
+"""
+
+import sys
+
+__all__ = ['Munkres', 'make_cost_matrix']
+
+
+class Munkres:
+ """
+ Calculate the Munkres solution to the classical assignment problem.
+ See the module documentation for usage.
+ """
+
+ def __init__(self):
+ """Create a new instance"""
+ self.C = None
+ self.row_covered = []
+ self.col_covered = []
+ self.n = 0
+ self.Z0_r = 0
+ self.Z0_c = 0
+ self.marked = None
+ self.path = None
+
+ def make_cost_matrix(profit_matrix, inversion_function):
+ """
+ **DEPRECATED**
+
+ Please use the module function ``make_cost_matrix()``.
+ """
+ import munkres
+ return munkres.make_cost_matrix(profit_matrix, inversion_function)
+
+ make_cost_matrix = staticmethod(make_cost_matrix)
+
+ def pad_matrix(self, matrix, pad_value=0):
+ """
+ Pad a possibly non-square matrix to make it square.
+
+ :Parameters:
+ matrix : list of lists
+ matrix to pad
+
+ pad_value : int
+ value to use to pad the matrix
+
+ :rtype: list of lists
+ :return: a new, possibly padded, matrix
+ """
+ max_columns = 0
+ total_rows = len(matrix)
+
+ for row in matrix:
+ max_columns = max(max_columns, len(row))
+
+ total_rows = max(max_columns, total_rows)
+
+ new_matrix = []
+ for row in matrix:
+ row_len = len(row)
+ new_row = row[:]
+ if total_rows > row_len:
+ # Row too short. Pad it.
+ new_row += [0] * (total_rows - row_len)
+ new_matrix += [new_row]
+
+ while len(new_matrix) < total_rows:
+ new_matrix += [[0] * total_rows]
+
+ return new_matrix
+
+ def compute(self, cost_matrix):
+ """
+ Compute the indexes for the lowest-cost pairings between rows and
+ columns in the database. Returns a list of (row, column) tuples
+ that can be used to traverse the matrix.
+
+ :Parameters:
+ cost_matrix : list of lists
+ The cost matrix. If this cost matrix is not square, it
+ will be padded with zeros, via a call to ``pad_matrix()``.
+ (This method does *not* modify the caller's matrix. It
+ operates on a copy of the matrix.)
+
+ **WARNING**: This code handles square and rectangular
+ matrices. It does *not* handle irregular matrices.
+
+ :rtype: list
+ :return: A list of ``(row, column)`` tuples that describe the lowest
+ cost path through the matrix
+
+ """
+ self.C = self.pad_matrix(cost_matrix)
+ self.n = len(self.C)
+ self.original_length = len(cost_matrix)
+ self.original_width = len(cost_matrix[0])
+ self.row_covered = [False for i in range(self.n)]
+ self.col_covered = [False for i in range(self.n)]
+ self.Z0_r = 0
+ self.Z0_c = 0
+ self.path = self.__make_matrix(self.n * 2, 0)
+ self.marked = self.__make_matrix(self.n, 0)
+
+ done = False
+ step = 1
+
+ steps = {
+ 1: self.__step1,
+ 2: self.__step2,
+ 3: self.__step3,
+ 4: self.__step4,
+ 5: self.__step5,
+ 6: self.__step6
+ }
+
+ while not done:
+ try:
+ func = steps[step]
+ step = func()
+ except KeyError:
+ done = True
+
+ # Look for the starred columns
+ results = []
+ for i in range(self.original_length):
+ for j in range(self.original_width):
+ if self.marked[i][j] == 1:
+ results += [(i, j)]
+
+ return results
+
+ def __copy_matrix(self, matrix):
+ """Return an exact copy of the supplied matrix"""
+ return copy.deepcopy(matrix)
+
+ def __make_matrix(self, n, val):
+ """Create an *n*x*n* matrix, populating it with the specific value."""
+ matrix = []
+ for i in range(n):
+ matrix += [[val for j in range(n)]]
+ return matrix
+
+ def __step1(self):
+ """
+ For each row of the matrix, find the smallest element and
+ subtract it from every element in its row. Go to Step 2.
+ """
+ C = self.C
+ n = self.n
+ for i in range(n):
+ minval = min(self.C[i])
+ # Find the minimum value for this row and subtract that minimum
+ # from every element in the row.
+ for j in range(n):
+ self.C[i][j] -= minval
+
+ return 2
+
+ def __step2(self):
+ """
+ Find a zero (Z) in the resulting matrix. If there is no starred
+ zero in its row or column, star Z. Repeat for each element in the
+ matrix. Go to Step 3.
+ """
+ n = self.n
+ for i in range(n):
+ for j in range(n):
+ if (self.C[i][j] == 0) and \
+ (not self.col_covered[j]) and \
+ (not self.row_covered[i]):
+ self.marked[i][j] = 1
+ self.col_covered[j] = True
+ self.row_covered[i] = True
+
+ self.__clear_covers()
+ return 3
+
+ def __step3(self):
+ """
+ Cover each column containing a starred zero. If K columns are
+ covered, the starred zeros describe a complete set of unique
+ assignments. In this case, Go to DONE, otherwise, Go to Step 4.
+ """
+ n = self.n
+ count = 0
+ for i in range(n):
+ for j in range(n):
+ if self.marked[i][j] == 1:
+ self.col_covered[j] = True
+ count += 1
+
+ if count >= n:
+ step = 7 # done
+ else:
+ step = 4
+
+ return step
+
+ def __step4(self):
+ """
+ Find a noncovered zero and prime it. If there is no starred zero
+ in the row containing this primed zero, Go to Step 5. Otherwise,
+ cover this row and uncover the column containing the starred
+ zero. Continue in this manner until there are no uncovered zeros
+ left. Save the smallest uncovered value and Go to Step 6.
+ """
+ step = 0
+ done = False
+ row = -1
+ col = -1
+ star_col = -1
+ while not done:
+ (row, col) = self.__find_a_zero()
+ if row < 0:
+ done = True
+ step = 6
+ else:
+ self.marked[row][col] = 2
+ star_col = self.__find_star_in_row(row)
+ if star_col >= 0:
+ col = star_col
+ self.row_covered[row] = True
+ self.col_covered[col] = False
+ else:
+ done = True
+ self.Z0_r = row
+ self.Z0_c = col
+ step = 5
+
+ return step
+
+ def __step5(self):
+ """
+ Construct a series of alternating primed and starred zeros as
+ follows. Let Z0 represent the uncovered primed zero found in Step 4.
+ Let Z1 denote the starred zero in the column of Z0 (if any).
+ Let Z2 denote the primed zero in the row of Z1 (there will always
+ be one). Continue until the series terminates at a primed zero
+ that has no starred zero in its column. Unstar each starred zero
+ of the series, star each primed zero of the series, erase all
+ primes and uncover every line in the matrix. Return to Step 3
+ """
+ count = 0
+ path = self.path
+ path[count][0] = self.Z0_r
+ path[count][1] = self.Z0_c
+ done = False
+ while not done:
+ row = self.__find_star_in_col(path[count][1])
+ if row >= 0:
+ count += 1
+ path[count][0] = row
+ path[count][1] = path[count - 1][1]
+ else:
+ done = True
+
+ if not done:
+ col = self.__find_prime_in_row(path[count][0])
+ count += 1
+ path[count][0] = path[count - 1][0]
+ path[count][1] = col
+
+ self.__convert_path(path, count)
+ self.__clear_covers()
+ self.__erase_primes()
+ return 3
+
+ def __step6(self):
+ """
+ Add the value found in Step 4 to every element of each covered
+ row, and subtract it from every element of each uncovered column.
+ Return to Step 4 without altering any stars, primes, or covered
+ lines.
+ """
+ minval = self.__find_smallest()
+ for i in range(self.n):
+ for j in range(self.n):
+ if self.row_covered[i]:
+ self.C[i][j] += minval
+ if not self.col_covered[j]:
+ self.C[i][j] -= minval
+ return 4
+
+ def __find_smallest(self):
+ """Find the smallest uncovered value in the matrix."""
+ minval = 2e9 # sys.maxint
+ for i in range(self.n):
+ for j in range(self.n):
+ if (not self.row_covered[i]) and (not self.col_covered[j]):
+ if minval > self.C[i][j]:
+ minval = self.C[i][j]
+ return minval
+
+ def __find_a_zero(self):
+ """Find the first uncovered element with value 0"""
+ row = -1
+ col = -1
+ i = 0
+ n = self.n
+ done = False
+
+ while not done:
+ j = 0
+ while True:
+ if (self.C[i][j] == 0) and \
+ (not self.row_covered[i]) and \
+ (not self.col_covered[j]):
+ row = i
+ col = j
+ done = True
+ j += 1
+ if j >= n:
+ break
+ i += 1
+ if i >= n:
+ done = True
+
+ return (row, col)
+
+ def __find_star_in_row(self, row):
+ """
+ Find the first starred element in the specified row. Returns
+ the column index, or -1 if no starred element was found.
+ """
+ col = -1
+ for j in range(self.n):
+ if self.marked[row][j] == 1:
+ col = j
+ break
+
+ return col
+
+ def __find_star_in_col(self, col):
+ """
+ Find the first starred element in the specified row. Returns
+ the row index, or -1 if no starred element was found.
+ """
+ row = -1
+ for i in range(self.n):
+ if self.marked[i][col] == 1:
+ row = i
+ break
+
+ return row
+
+ def __find_prime_in_row(self, row):
+ """
+ Find the first prime element in the specified row. Returns
+ the column index, or -1 if no starred element was found.
+ """
+ col = -1
+ for j in range(self.n):
+ if self.marked[row][j] == 2:
+ col = j
+ break
+
+ return col
+
+ def __convert_path(self, path, count):
+ for i in range(count + 1):
+ if self.marked[path[i][0]][path[i][1]] == 1:
+ self.marked[path[i][0]][path[i][1]] = 0
+ else:
+ self.marked[path[i][0]][path[i][1]] = 1
+
+ def __clear_covers(self):
+ """Clear all covered matrix cells"""
+ for i in range(self.n):
+ self.row_covered[i] = False
+ self.col_covered[i] = False
+
+ def __erase_primes(self):
+ """Erase all prime markings"""
+ for i in range(self.n):
+ for j in range(self.n):
+ if self.marked[i][j] == 2:
+ self.marked[i][j] = 0
+
+
+def make_cost_matrix(profit_matrix, inversion_function):
+ """
+ Create a cost matrix from a profit matrix by calling
+ 'inversion_function' to invert each value. The inversion
+ function must take one numeric argument (of any type) and return
+ another numeric argument which is presumed to be the cost inverse
+ of the original profit.
+
+ This is a static method. Call it like this:
+
+ .. python::
+
+ cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func)
+
+ For example:
+
+ .. python::
+
+ cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxint - x)
+
+ :Parameters:
+ profit_matrix : list of lists
+ The matrix to convert from a profit to a cost matrix
+
+ inversion_function : function
+ The function to use to invert each entry in the profit matrix
+
+ :rtype: list of lists
+ :return: The converted matrix
+ """
+ cost_matrix = []
+ for row in profit_matrix:
+ cost_matrix.append([inversion_function(value) for value in row])
+ return cost_matrix
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/widerface_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/widerface_utils.py
new file mode 100644
index 000000000..2f64bf6d5
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/metrics/widerface_utils.py
@@ -0,0 +1,391 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import cv2
+import numpy as np
+from collections import OrderedDict
+
+import paddle
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['face_eval_run', 'lmk2out']
+
+
+def face_eval_run(model,
+ image_dir,
+ gt_file,
+ pred_dir='output/pred',
+ eval_mode='widerface',
+ multi_scale=False):
+ # load ground truth files
+ with open(gt_file, 'r') as f:
+ gt_lines = f.readlines()
+ imid2path = []
+ pos_gt = 0
+ while pos_gt < len(gt_lines):
+ name_gt = gt_lines[pos_gt].strip('\n\t').split()[0]
+ imid2path.append(name_gt)
+ pos_gt += 1
+ n_gt = int(gt_lines[pos_gt].strip('\n\t').split()[0])
+ pos_gt += 1 + n_gt
+ logger.info('The ground truth file load {} images'.format(len(imid2path)))
+
+ dets_dist = OrderedDict()
+ for iter_id, im_path in enumerate(imid2path):
+ image_path = os.path.join(image_dir, im_path)
+ if eval_mode == 'fddb':
+ image_path += '.jpg'
+ assert os.path.exists(image_path)
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ if multi_scale:
+ shrink, max_shrink = get_shrink(image.shape[0], image.shape[1])
+ det0 = detect_face(model, image, shrink)
+ det1 = flip_test(model, image, shrink)
+ [det2, det3] = multi_scale_test(model, image, max_shrink)
+ det4 = multi_scale_test_pyramid(model, image, max_shrink)
+ det = np.row_stack((det0, det1, det2, det3, det4))
+ dets = bbox_vote(det)
+ else:
+ dets = detect_face(model, image, 1)
+ if eval_mode == 'widerface':
+ save_widerface_bboxes(image_path, dets, pred_dir)
+ else:
+ dets_dist[im_path] = dets
+ if iter_id % 100 == 0:
+ logger.info('Test iter {}'.format(iter_id))
+ if eval_mode == 'fddb':
+ save_fddb_bboxes(dets_dist, pred_dir)
+ logger.info("Finish evaluation.")
+
+
+def detect_face(model, image, shrink):
+ image_shape = [image.shape[0], image.shape[1]]
+ if shrink != 1:
+ h, w = int(image_shape[0] * shrink), int(image_shape[1] * shrink)
+ image = cv2.resize(image, (w, h))
+ image_shape = [h, w]
+
+ img = face_img_process(image)
+ image_shape = np.asarray([image_shape])
+ scale_factor = np.asarray([[shrink, shrink]])
+ data = {
+ "image": paddle.to_tensor(
+ img, dtype='float32'),
+ "im_shape": paddle.to_tensor(
+ image_shape, dtype='float32'),
+ "scale_factor": paddle.to_tensor(
+ scale_factor, dtype='float32')
+ }
+ model.eval()
+ detection = model(data)
+ detection = detection['bbox'].numpy()
+ # layout: xmin, ymin, xmax. ymax, score
+ if np.prod(detection.shape) == 1:
+ logger.info("No face detected")
+ return np.array([[0, 0, 0, 0, 0]])
+ det_conf = detection[:, 1]
+ det_xmin = detection[:, 2]
+ det_ymin = detection[:, 3]
+ det_xmax = detection[:, 4]
+ det_ymax = detection[:, 5]
+
+ det = np.column_stack((det_xmin, det_ymin, det_xmax, det_ymax, det_conf))
+ return det
+
+
+def flip_test(model, image, shrink):
+ img = cv2.flip(image, 1)
+ det_f = detect_face(model, img, shrink)
+ det_t = np.zeros(det_f.shape)
+ img_width = image.shape[1]
+ det_t[:, 0] = img_width - det_f[:, 2]
+ det_t[:, 1] = det_f[:, 1]
+ det_t[:, 2] = img_width - det_f[:, 0]
+ det_t[:, 3] = det_f[:, 3]
+ det_t[:, 4] = det_f[:, 4]
+ return det_t
+
+
+def multi_scale_test(model, image, max_shrink):
+ # Shrink detecting is only used to detect big faces
+ st = 0.5 if max_shrink >= 0.75 else 0.5 * max_shrink
+ det_s = detect_face(model, image, st)
+ index = np.where(
+ np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1)
+ > 30)[0]
+ det_s = det_s[index, :]
+ # Enlarge one times
+ bt = min(2, max_shrink) if max_shrink > 1 else (st + max_shrink) / 2
+ det_b = detect_face(model, image, bt)
+
+ # Enlarge small image x times for small faces
+ if max_shrink > 2:
+ bt *= 2
+ while bt < max_shrink:
+ det_b = np.row_stack((det_b, detect_face(model, image, bt)))
+ bt *= 2
+ det_b = np.row_stack((det_b, detect_face(model, image, max_shrink)))
+
+ # Enlarged images are only used to detect small faces.
+ if bt > 1:
+ index = np.where(
+ np.minimum(det_b[:, 2] - det_b[:, 0] + 1,
+ det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]
+ det_b = det_b[index, :]
+ # Shrinked images are only used to detect big faces.
+ else:
+ index = np.where(
+ np.maximum(det_b[:, 2] - det_b[:, 0] + 1,
+ det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]
+ det_b = det_b[index, :]
+ return det_s, det_b
+
+
+def multi_scale_test_pyramid(model, image, max_shrink):
+ # Use image pyramids to detect faces
+ det_b = detect_face(model, image, 0.25)
+ index = np.where(
+ np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1)
+ > 30)[0]
+ det_b = det_b[index, :]
+
+ st = [0.75, 1.25, 1.5, 1.75]
+ for i in range(len(st)):
+ if st[i] <= max_shrink:
+ det_temp = detect_face(model, image, st[i])
+ # Enlarged images are only used to detect small faces.
+ if st[i] > 1:
+ index = np.where(
+ np.minimum(det_temp[:, 2] - det_temp[:, 0] + 1,
+ det_temp[:, 3] - det_temp[:, 1] + 1) < 100)[0]
+ det_temp = det_temp[index, :]
+ # Shrinked images are only used to detect big faces.
+ else:
+ index = np.where(
+ np.maximum(det_temp[:, 2] - det_temp[:, 0] + 1,
+ det_temp[:, 3] - det_temp[:, 1] + 1) > 30)[0]
+ det_temp = det_temp[index, :]
+ det_b = np.row_stack((det_b, det_temp))
+ return det_b
+
+
+def to_chw(image):
+ """
+ Transpose image from HWC to CHW.
+ Args:
+ image (np.array): an image with HWC layout.
+ """
+ # HWC to CHW
+ if len(image.shape) == 3:
+ image = np.swapaxes(image, 1, 2)
+ image = np.swapaxes(image, 1, 0)
+ return image
+
+
+def face_img_process(image,
+ mean=[104., 117., 123.],
+ std=[127.502231, 127.502231, 127.502231]):
+ img = np.array(image)
+ img = to_chw(img)
+ img = img.astype('float32')
+ img -= np.array(mean)[:, np.newaxis, np.newaxis].astype('float32')
+ img /= np.array(std)[:, np.newaxis, np.newaxis].astype('float32')
+ img = [img]
+ img = np.array(img)
+ return img
+
+
+def get_shrink(height, width):
+ """
+ Args:
+ height (int): image height.
+ width (int): image width.
+ """
+ # avoid out of memory
+ max_shrink_v1 = (0x7fffffff / 577.0 / (height * width))**0.5
+ max_shrink_v2 = ((678 * 1024 * 2.0 * 2.0) / (height * width))**0.5
+
+ def get_round(x, loc):
+ str_x = str(x)
+ if '.' in str_x:
+ str_before, str_after = str_x.split('.')
+ len_after = len(str_after)
+ if len_after >= 3:
+ str_final = str_before + '.' + str_after[0:loc]
+ return float(str_final)
+ else:
+ return x
+
+ max_shrink = get_round(min(max_shrink_v1, max_shrink_v2), 2) - 0.3
+ if max_shrink >= 1.5 and max_shrink < 2:
+ max_shrink = max_shrink - 0.1
+ elif max_shrink >= 2 and max_shrink < 3:
+ max_shrink = max_shrink - 0.2
+ elif max_shrink >= 3 and max_shrink < 4:
+ max_shrink = max_shrink - 0.3
+ elif max_shrink >= 4 and max_shrink < 5:
+ max_shrink = max_shrink - 0.4
+ elif max_shrink >= 5:
+ max_shrink = max_shrink - 0.5
+ elif max_shrink <= 0.1:
+ max_shrink = 0.1
+
+ shrink = max_shrink if max_shrink < 1 else 1
+ return shrink, max_shrink
+
+
+def bbox_vote(det):
+ order = det[:, 4].ravel().argsort()[::-1]
+ det = det[order, :]
+ if det.shape[0] == 0:
+ dets = np.array([[10, 10, 20, 20, 0.002]])
+ det = np.empty(shape=[0, 5])
+ while det.shape[0] > 0:
+ # IOU
+ area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)
+ xx1 = np.maximum(det[0, 0], det[:, 0])
+ yy1 = np.maximum(det[0, 1], det[:, 1])
+ xx2 = np.minimum(det[0, 2], det[:, 2])
+ yy2 = np.minimum(det[0, 3], det[:, 3])
+ w = np.maximum(0.0, xx2 - xx1 + 1)
+ h = np.maximum(0.0, yy2 - yy1 + 1)
+ inter = w * h
+ o = inter / (area[0] + area[:] - inter)
+
+ # nms
+ merge_index = np.where(o >= 0.3)[0]
+ det_accu = det[merge_index, :]
+ det = np.delete(det, merge_index, 0)
+ if merge_index.shape[0] <= 1:
+ if det.shape[0] == 0:
+ try:
+ dets = np.row_stack((dets, det_accu))
+ except:
+ dets = det_accu
+ continue
+ det_accu[:, 0:4] = det_accu[:, 0:4] * np.tile(det_accu[:, -1:], (1, 4))
+ max_score = np.max(det_accu[:, 4])
+ det_accu_sum = np.zeros((1, 5))
+ det_accu_sum[:, 0:4] = np.sum(det_accu[:, 0:4],
+ axis=0) / np.sum(det_accu[:, -1:])
+ det_accu_sum[:, 4] = max_score
+ try:
+ dets = np.row_stack((dets, det_accu_sum))
+ except:
+ dets = det_accu_sum
+ dets = dets[0:750, :]
+ keep_index = np.where(dets[:, 4] >= 0.01)[0]
+ dets = dets[keep_index, :]
+ return dets
+
+
+def save_widerface_bboxes(image_path, bboxes_scores, output_dir):
+ image_name = image_path.split('/')[-1]
+ image_class = image_path.split('/')[-2]
+ odir = os.path.join(output_dir, image_class)
+ if not os.path.exists(odir):
+ os.makedirs(odir)
+
+ ofname = os.path.join(odir, '%s.txt' % (image_name[:-4]))
+ f = open(ofname, 'w')
+ f.write('{:s}\n'.format(image_class + '/' + image_name))
+ f.write('{:d}\n'.format(bboxes_scores.shape[0]))
+ for box_score in bboxes_scores:
+ xmin, ymin, xmax, ymax, score = box_score
+ f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\n'.format(xmin, ymin, (
+ xmax - xmin + 1), (ymax - ymin + 1), score))
+ f.close()
+ logger.info("The predicted result is saved as {}".format(ofname))
+
+
+def save_fddb_bboxes(bboxes_scores,
+ output_dir,
+ output_fname='pred_fddb_res.txt'):
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ predict_file = os.path.join(output_dir, output_fname)
+ f = open(predict_file, 'w')
+ for image_path, dets in bboxes_scores.iteritems():
+ f.write('{:s}\n'.format(image_path))
+ f.write('{:d}\n'.format(dets.shape[0]))
+ for box_score in dets:
+ xmin, ymin, xmax, ymax, score = box_score
+ width, height = xmax - xmin, ymax - ymin
+ f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\n'
+ .format(xmin, ymin, width, height, score))
+ logger.info("The predicted result is saved as {}".format(predict_file))
+ return predict_file
+
+
+def lmk2out(results, is_bbox_normalized=False):
+ """
+ Args:
+ results: request a dict, should include: `landmark`, `im_id`,
+ if is_bbox_normalized=True, also need `im_shape`.
+ is_bbox_normalized: whether or not landmark is normalized.
+ """
+ xywh_res = []
+ for t in results:
+ bboxes = t['bbox'][0]
+ lengths = t['bbox'][1][0]
+ im_ids = np.array(t['im_id'][0]).flatten()
+ if bboxes.shape == (1, 1) or bboxes is None:
+ continue
+ face_index = t['face_index'][0]
+ prior_box = t['prior_boxes'][0]
+ predict_lmk = t['landmark'][0]
+ prior = np.reshape(prior_box, (-1, 4))
+ predictlmk = np.reshape(predict_lmk, (-1, 10))
+
+ k = 0
+ for a in range(len(lengths)):
+ num = lengths[a]
+ im_id = int(im_ids[a])
+ for i in range(num):
+ score = bboxes[k][1]
+ theindex = face_index[i][0]
+ me_prior = prior[theindex, :]
+ lmk_pred = predictlmk[theindex, :]
+ prior_w = me_prior[2] - me_prior[0]
+ prior_h = me_prior[3] - me_prior[1]
+ prior_w_center = (me_prior[2] + me_prior[0]) / 2
+ prior_h_center = (me_prior[3] + me_prior[1]) / 2
+ lmk_decode = np.zeros((10))
+ for j in [0, 2, 4, 6, 8]:
+ lmk_decode[j] = lmk_pred[j] * 0.1 * prior_w + prior_w_center
+ for j in [1, 3, 5, 7, 9]:
+ lmk_decode[j] = lmk_pred[j] * 0.1 * prior_h + prior_h_center
+ im_shape = t['im_shape'][0][a].tolist()
+ image_h, image_w = int(im_shape[0]), int(im_shape[1])
+ if is_bbox_normalized:
+ lmk_decode = lmk_decode * np.array([
+ image_w, image_h, image_w, image_h, image_w, image_h,
+ image_w, image_h, image_w, image_h
+ ])
+ lmk_res = {
+ 'image_id': im_id,
+ 'landmark': lmk_decode,
+ 'score': score,
+ }
+ xywh_res.append(lmk_res)
+ k += 1
+ return xywh_res
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/.gitignore b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/.gitignore
new file mode 100644
index 000000000..f296851d6
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/.gitignore
@@ -0,0 +1 @@
+MODEL_ZOO
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/__init__.py
new file mode 100644
index 000000000..6db6eb6c6
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import model_zoo
+from .model_zoo import *
+
+__all__ = model_zoo.__all__
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..dccbb6902
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/__pycache__/model_zoo.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/__pycache__/model_zoo.cpython-37.pyc
new file mode 100644
index 000000000..dd8c6c51a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/__pycache__/model_zoo.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/model_zoo.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/model_zoo.py
new file mode 100644
index 000000000..27581ef79
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/model_zoo.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path as osp
+import pkg_resources
+
+try:
+ from collections.abc import Sequence
+except:
+ from collections import Sequence
+
+from ppdet.core.workspace import load_config, create
+from ppdet.utils.checkpoint import load_weight
+from ppdet.utils.download import get_config_path
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = [
+ 'list_model', 'get_config_file', 'get_weights_url', 'get_model',
+ 'MODEL_ZOO_FILENAME'
+]
+
+MODEL_ZOO_FILENAME = 'MODEL_ZOO'
+
+
+def list_model(filters=[]):
+ model_zoo_file = pkg_resources.resource_filename('ppdet.model_zoo',
+ MODEL_ZOO_FILENAME)
+ with open(model_zoo_file) as f:
+ model_names = f.read().splitlines()
+
+ # filter model_name
+ def filt(name):
+ for f in filters:
+ if name.find(f) < 0:
+ return False
+ return True
+
+ if isinstance(filters, str) or not isinstance(filters, Sequence):
+ filters = [filters]
+ model_names = [name for name in model_names if filt(name)]
+ if len(model_names) == 0 and len(filters) > 0:
+ raise ValueError("no model found, please check filters seeting, "
+ "filters can be set as following kinds:\n"
+ "\tDataset: coco, voc ...\n"
+ "\tArchitecture: yolo, rcnn, ssd ...\n"
+ "\tBackbone: resnet, vgg, darknet ...\n")
+
+ model_str = "Available Models:\n"
+ for model_name in model_names:
+ model_str += "\t{}\n".format(model_name)
+ logger.info(model_str)
+
+
+# models and configs save on bcebos under dygraph directory
+def get_config_file(model_name):
+ return get_config_path("ppdet://configs/{}.yml".format(model_name))
+
+
+def get_weights_url(model_name):
+ return "ppdet://models/{}.pdparams".format(osp.split(model_name)[-1])
+
+
+def get_model(model_name, pretrained=True):
+ cfg_file = get_config_file(model_name)
+ cfg = load_config(cfg_file)
+ model = create(cfg.architecture)
+
+ if pretrained:
+ load_weight(model, get_weights_url(model_name))
+
+ return model
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/tests/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/tests/__init__.py
new file mode 100644
index 000000000..6f0ea8534
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/tests/test_get_model.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/tests/test_get_model.py
new file mode 100644
index 000000000..8887185e0
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/tests/test_get_model.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import paddle
+import ppdet
+import unittest
+
+# NOTE: weights downloading costs time, we choose
+# a small model for unittesting
+MODEL_NAME = 'ppyolo/ppyolo_tiny_650e_coco'
+
+
+class TestGetConfigFile(unittest.TestCase):
+ def test_main(self):
+ try:
+ cfg_file = ppdet.model_zoo.get_config_file(MODEL_NAME)
+ assert os.path.isfile(cfg_file)
+ except:
+ self.assertTrue(False)
+
+
+class TestGetModel(unittest.TestCase):
+ def test_main(self):
+ try:
+ model = ppdet.model_zoo.get_model(MODEL_NAME)
+ assert isinstance(model, paddle.nn.Layer)
+ except:
+ self.assertTrue(False)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/tests/test_list_model.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/tests/test_list_model.py
new file mode 100644
index 000000000..8f91afe00
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/model_zoo/tests/test_list_model.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import unittest
+import ppdet
+
+
+class TestListModel(unittest.TestCase):
+ def setUp(self):
+ self._filter = []
+
+ def test_main(self):
+ try:
+ ppdet.model_zoo.list_model(self._filter)
+ self.assertTrue(True)
+ except:
+ self.assertTrue(False)
+
+
+class TestListModelYOLO(TestListModel):
+ def setUp(self):
+ self._filter = ['yolo']
+
+
+class TestListModelRCNN(TestListModel):
+ def setUp(self):
+ self._filter = ['rcnn']
+
+
+class TestListModelSSD(TestListModel):
+ def setUp(self):
+ self._filter = ['ssd']
+
+
+class TestListModelMultiFilter(TestListModel):
+ def setUp(self):
+ self._filter = ['yolo', 'darknet']
+
+
+class TestListModelError(unittest.TestCase):
+ def setUp(self):
+ self._filter = ['xxx']
+
+ def test_main(self):
+ try:
+ ppdet.model_zoo.list_model(self._filter)
+ self.assertTrue(False)
+ except ValueError:
+ self.assertTrue(True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__init__.py
new file mode 100644
index 000000000..cdcb5d1bf
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__init__.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import warnings
+warnings.filterwarnings(
+ action='ignore', category=DeprecationWarning, module='ops')
+
+from . import ops
+from . import backbones
+from . import necks
+from . import proposal_generator
+from . import heads
+from . import losses
+from . import architectures
+from . import post_process
+from . import layers
+from . import reid
+from . import mot
+from . import transformers
+from . import assigners
+
+from .ops import *
+from .backbones import *
+from .necks import *
+from .proposal_generator import *
+from .heads import *
+from .losses import *
+from .architectures import *
+from .post_process import *
+from .layers import *
+from .reid import *
+from .mot import *
+from .transformers import *
+from .assigners import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..0d7ca5f63
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/bbox_utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/bbox_utils.cpython-37.pyc
new file mode 100644
index 000000000..ea3062ca1
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/bbox_utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/initializer.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/initializer.cpython-37.pyc
new file mode 100644
index 000000000..39ca157db
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/initializer.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/keypoint_utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/keypoint_utils.cpython-37.pyc
new file mode 100644
index 000000000..482bd4353
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/keypoint_utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/layers.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/layers.cpython-37.pyc
new file mode 100644
index 000000000..7ccda4248
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/layers.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/ops.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/ops.cpython-37.pyc
new file mode 100644
index 000000000..9a5aa8441
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/ops.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/post_process.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/post_process.cpython-37.pyc
new file mode 100644
index 000000000..3ee8944b2
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/post_process.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/shape_spec.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/shape_spec.cpython-37.pyc
new file mode 100644
index 000000000..fe06b61b1
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/__pycache__/shape_spec.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__init__.py
new file mode 100644
index 000000000..b5feb06d8
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__init__.py
@@ -0,0 +1,51 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+from . import meta_arch
+from . import faster_rcnn
+from . import mask_rcnn
+from . import yolo
+from . import cascade_rcnn
+from . import ssd
+from . import fcos
+from . import solov2
+from . import ttfnet
+from . import s2anet
+from . import keypoint_hrhrnet
+from . import keypoint_hrnet
+from . import jde
+from . import deepsort
+from . import fairmot
+from . import centernet
+from . import gfl
+from . import picodet
+from . import detr
+from . import sparse_rcnn
+from . import tood
+
+from .meta_arch import *
+from .faster_rcnn import *
+from .mask_rcnn import *
+from .yolo import *
+from .cascade_rcnn import *
+from .ssd import *
+from .fcos import *
+from .solov2 import *
+from .ttfnet import *
+from .s2anet import *
+from .keypoint_hrhrnet import *
+from .keypoint_hrnet import *
+from .jde import *
+from .deepsort import *
+from .fairmot import *
+from .centernet import *
+from .blazeface import *
+from .gfl import *
+from .picodet import *
+from .detr import *
+from .sparse_rcnn import *
+from .tood import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..8b5e1b231
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/blazeface.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/blazeface.cpython-37.pyc
new file mode 100644
index 000000000..8481f88af
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/blazeface.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/cascade_rcnn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/cascade_rcnn.cpython-37.pyc
new file mode 100644
index 000000000..3524b345f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/cascade_rcnn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/centernet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/centernet.cpython-37.pyc
new file mode 100644
index 000000000..203ccb71f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/centernet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/deepsort.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/deepsort.cpython-37.pyc
new file mode 100644
index 000000000..1530b32d4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/deepsort.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/detr.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/detr.cpython-37.pyc
new file mode 100644
index 000000000..5b2a1625f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/detr.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/fairmot.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/fairmot.cpython-37.pyc
new file mode 100644
index 000000000..7763b0b40
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/fairmot.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/faster_rcnn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/faster_rcnn.cpython-37.pyc
new file mode 100644
index 000000000..3727180f5
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/faster_rcnn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/fcos.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/fcos.cpython-37.pyc
new file mode 100644
index 000000000..86ef96738
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/fcos.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/gfl.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/gfl.cpython-37.pyc
new file mode 100644
index 000000000..a42fad809
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/gfl.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/jde.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/jde.cpython-37.pyc
new file mode 100644
index 000000000..71c58f25d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/jde.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/keypoint_hrhrnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/keypoint_hrhrnet.cpython-37.pyc
new file mode 100644
index 000000000..3c138dc87
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/keypoint_hrhrnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/keypoint_hrnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/keypoint_hrnet.cpython-37.pyc
new file mode 100644
index 000000000..48a7b5e29
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/keypoint_hrnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/mask_rcnn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/mask_rcnn.cpython-37.pyc
new file mode 100644
index 000000000..9f2423852
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/mask_rcnn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/meta_arch.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/meta_arch.cpython-37.pyc
new file mode 100644
index 000000000..6ce3ee761
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/meta_arch.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/picodet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/picodet.cpython-37.pyc
new file mode 100644
index 000000000..e27b87c8d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/picodet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/s2anet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/s2anet.cpython-37.pyc
new file mode 100644
index 000000000..5328d43d4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/s2anet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/solov2.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/solov2.cpython-37.pyc
new file mode 100644
index 000000000..38764e818
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/solov2.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/sparse_rcnn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/sparse_rcnn.cpython-37.pyc
new file mode 100644
index 000000000..2c87c800e
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/sparse_rcnn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/ssd.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/ssd.cpython-37.pyc
new file mode 100644
index 000000000..e94cda5d4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/ssd.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/tood.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/tood.cpython-37.pyc
new file mode 100644
index 000000000..882d03fba
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/tood.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/ttfnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/ttfnet.cpython-37.pyc
new file mode 100644
index 000000000..4033e47d7
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/ttfnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/yolo.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/yolo.cpython-37.pyc
new file mode 100644
index 000000000..27c693dec
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/__pycache__/yolo.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/blazeface.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/blazeface.py
new file mode 100644
index 000000000..af6aa269d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/blazeface.py
@@ -0,0 +1,91 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['BlazeFace']
+
+
+@register
+class BlazeFace(BaseArch):
+ """
+ BlazeFace: Sub-millisecond Neural Face Detection on Mobile GPUs,
+ see https://arxiv.org/abs/1907.05047
+
+ Args:
+ backbone (nn.Layer): backbone instance
+ neck (nn.Layer): neck instance
+ blaze_head (nn.Layer): `blazeHead` instance
+ post_process (object): `BBoxPostProcess` instance
+ """
+
+ __category__ = 'architecture'
+ __inject__ = ['post_process']
+
+ def __init__(self, backbone, blaze_head, neck, post_process):
+ super(BlazeFace, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.blaze_head = blaze_head
+ self.post_process = post_process
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ # backbone
+ backbone = create(cfg['backbone'])
+ # fpn
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = create(cfg['neck'], **kwargs)
+ # head
+ kwargs = {'input_shape': neck.out_shape}
+ blaze_head = create(cfg['blaze_head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ 'blaze_head': blaze_head,
+ }
+
+ def _forward(self):
+ # Backbone
+ body_feats = self.backbone(self.inputs)
+ # neck
+ neck_feats = self.neck(body_feats)
+ # blaze Head
+ if self.training:
+ return self.blaze_head(neck_feats, self.inputs['image'],
+ self.inputs['gt_bbox'],
+ self.inputs['gt_class'])
+ else:
+ preds, anchors = self.blaze_head(neck_feats, self.inputs['image'])
+ bbox, bbox_num = self.post_process(preds, anchors,
+ self.inputs['im_shape'],
+ self.inputs['scale_factor'])
+ return bbox, bbox_num
+
+ def get_loss(self, ):
+ return {"loss": self._forward()}
+
+ def get_pred(self):
+ bbox_pred, bbox_num = self._forward()
+ output = {
+ "bbox": bbox_pred,
+ "bbox_num": bbox_num,
+ }
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/cascade_rcnn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/cascade_rcnn.py
new file mode 100644
index 000000000..ac29b775d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/cascade_rcnn.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['CascadeRCNN']
+
+
+@register
+class CascadeRCNN(BaseArch):
+ """
+ Cascade R-CNN network, see https://arxiv.org/abs/1712.00726
+
+ Args:
+ backbone (object): backbone instance
+ rpn_head (object): `RPNHead` instance
+ bbox_head (object): `BBoxHead` instance
+ bbox_post_process (object): `BBoxPostProcess` instance
+ neck (object): 'FPN' instance
+ mask_head (object): `MaskHead` instance
+ mask_post_process (object): `MaskPostProcess` instance
+ """
+ __category__ = 'architecture'
+ __inject__ = [
+ 'bbox_post_process',
+ 'mask_post_process',
+ ]
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ bbox_head,
+ bbox_post_process,
+ neck=None,
+ mask_head=None,
+ mask_post_process=None):
+ super(CascadeRCNN, self).__init__()
+ self.backbone = backbone
+ self.rpn_head = rpn_head
+ self.bbox_head = bbox_head
+ self.bbox_post_process = bbox_post_process
+ self.neck = neck
+ self.mask_head = mask_head
+ self.mask_post_process = mask_post_process
+ self.with_mask = mask_head is not None
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = cfg['neck'] and create(cfg['neck'], **kwargs)
+
+ out_shape = neck and neck.out_shape or backbone.out_shape
+ kwargs = {'input_shape': out_shape}
+ rpn_head = create(cfg['rpn_head'], **kwargs)
+ bbox_head = create(cfg['bbox_head'], **kwargs)
+
+ out_shape = neck and out_shape or bbox_head.get_head().out_shape
+ kwargs = {'input_shape': out_shape}
+ mask_head = cfg['mask_head'] and create(cfg['mask_head'], **kwargs)
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "rpn_head": rpn_head,
+ "bbox_head": bbox_head,
+ "mask_head": mask_head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ if self.neck is not None:
+ body_feats = self.neck(body_feats)
+
+ if self.training:
+ rois, rois_num, rpn_loss = self.rpn_head(body_feats, self.inputs)
+ bbox_loss, bbox_feat = self.bbox_head(body_feats, rois, rois_num,
+ self.inputs)
+ rois, rois_num = self.bbox_head.get_assigned_rois()
+ bbox_targets = self.bbox_head.get_assigned_targets()
+ if self.with_mask:
+ mask_loss = self.mask_head(body_feats, rois, rois_num,
+ self.inputs, bbox_targets, bbox_feat)
+ return rpn_loss, bbox_loss, mask_loss
+ else:
+ return rpn_loss, bbox_loss, {}
+ else:
+ rois, rois_num, _ = self.rpn_head(body_feats, self.inputs)
+ preds, _ = self.bbox_head(body_feats, rois, rois_num, self.inputs)
+ refined_rois = self.bbox_head.get_refined_rois()
+
+ im_shape = self.inputs['im_shape']
+ scale_factor = self.inputs['scale_factor']
+
+ bbox, bbox_num = self.bbox_post_process(
+ preds, (refined_rois, rois_num), im_shape, scale_factor)
+ # rescale the prediction back to origin image
+ bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num,
+ im_shape, scale_factor)
+ if not self.with_mask:
+ return bbox_pred, bbox_num, None
+ mask_out = self.mask_head(body_feats, bbox, bbox_num, self.inputs)
+ origin_shape = self.bbox_post_process.get_origin_shape()
+ mask_pred = self.mask_post_process(mask_out[:, 0, :, :], bbox_pred,
+ bbox_num, origin_shape)
+ return bbox_pred, bbox_num, mask_pred
+
+ def get_loss(self, ):
+ rpn_loss, bbox_loss, mask_loss = self._forward()
+ loss = {}
+ loss.update(rpn_loss)
+ loss.update(bbox_loss)
+ if self.with_mask:
+ loss.update(mask_loss)
+ total_loss = paddle.add_n(list(loss.values()))
+ loss.update({'loss': total_loss})
+ return loss
+
+ def get_pred(self):
+ bbox_pred, bbox_num, mask_pred = self._forward()
+ output = {
+ 'bbox': bbox_pred,
+ 'bbox_num': bbox_num,
+ }
+ if self.with_mask:
+ output.update({'mask': mask_pred})
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/centernet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/centernet.py
new file mode 100644
index 000000000..2287d743b
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/centernet.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['CenterNet']
+
+
+@register
+class CenterNet(BaseArch):
+ """
+ CenterNet network, see http://arxiv.org/abs/1904.07850
+
+ Args:
+ backbone (object): backbone instance
+ neck (object): FPN instance, default use 'CenterNetDLAFPN'
+ head (object): 'CenterNetHead' instance
+ post_process (object): 'CenterNetPostProcess' instance
+ for_mot (bool): whether return other features used in tracking model
+
+ """
+ __category__ = 'architecture'
+ __inject__ = ['post_process']
+ __shared__ = ['for_mot']
+
+ def __init__(self,
+ backbone,
+ neck='CenterNetDLAFPN',
+ head='CenterNetHead',
+ post_process='CenterNetPostProcess',
+ for_mot=False):
+ super(CenterNet, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.head = head
+ self.post_process = post_process
+ self.for_mot = for_mot
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = cfg['neck'] and create(cfg['neck'], **kwargs)
+
+ out_shape = neck and neck.out_shape or backbone.out_shape
+ kwargs = {'input_shape': out_shape}
+ head = create(cfg['head'], **kwargs)
+
+ return {'backbone': backbone, 'neck': neck, "head": head}
+
+ def _forward(self):
+ neck_feat = self.backbone(self.inputs)
+ if self.neck is not None:
+ neck_feat = self.neck(neck_feat)
+ head_out = self.head(neck_feat, self.inputs)
+ if self.for_mot:
+ head_out.update({'neck_feat': neck_feat})
+ elif self.training:
+ head_out['loss'] = head_out.pop('det_loss')
+ return head_out
+
+ def get_pred(self):
+ head_out = self._forward()
+ if self.for_mot:
+ bbox, bbox_inds, topk_clses = self.post_process(
+ head_out['heatmap'],
+ head_out['size'],
+ head_out['offset'],
+ im_shape=self.inputs['im_shape'],
+ scale_factor=self.inputs['scale_factor'])
+ output = {
+ "bbox": bbox,
+ "bbox_inds": bbox_inds,
+ "topk_clses": topk_clses,
+ "neck_feat": head_out['neck_feat']
+ }
+ else:
+ bbox, bbox_num, _ = self.post_process(
+ head_out['heatmap'],
+ head_out['size'],
+ head_out['offset'],
+ im_shape=self.inputs['im_shape'],
+ scale_factor=self.inputs['scale_factor'])
+ output = {
+ "bbox": bbox,
+ "bbox_num": bbox_num,
+ }
+ return output
+
+ def get_loss(self):
+ return self._forward()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/deepsort.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/deepsort.py
new file mode 100644
index 000000000..066f7a4ce
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/deepsort.py
@@ -0,0 +1,69 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+from ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
+
+__all__ = ['DeepSORT']
+
+
+@register
+class DeepSORT(BaseArch):
+ """
+ DeepSORT network, see https://arxiv.org/abs/1703.07402
+
+ Args:
+ detector (object): detector model instance
+ reid (object): reid model instance
+ tracker (object): tracker instance
+ """
+ __category__ = 'architecture'
+
+ def __init__(self,
+ detector='YOLOv3',
+ reid='PCBPyramid',
+ tracker='DeepSORTTracker'):
+ super(DeepSORT, self).__init__()
+ self.detector = detector
+ self.reid = reid
+ self.tracker = tracker
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ if cfg['detector'] != 'None':
+ detector = create(cfg['detector'])
+ else:
+ detector = None
+ reid = create(cfg['reid'])
+ tracker = create(cfg['tracker'])
+
+ return {
+ "detector": detector,
+ "reid": reid,
+ "tracker": tracker,
+ }
+
+ def _forward(self):
+ crops = self.inputs['crops']
+ features = self.reid(crops)
+ return features
+
+ def get_pred(self):
+ return self._forward()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/detr.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/detr.py
new file mode 100644
index 000000000..2c081bf6c
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/detr.py
@@ -0,0 +1,93 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from .meta_arch import BaseArch
+from ppdet.core.workspace import register, create
+
+__all__ = ['DETR']
+
+
+@register
+class DETR(BaseArch):
+ __category__ = 'architecture'
+ __inject__ = ['post_process']
+
+ def __init__(self,
+ backbone,
+ transformer,
+ detr_head,
+ post_process='DETRBBoxPostProcess'):
+ super(DETR, self).__init__()
+ self.backbone = backbone
+ self.transformer = transformer
+ self.detr_head = detr_head
+ self.post_process = post_process
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ # backbone
+ backbone = create(cfg['backbone'])
+ # transformer
+ kwargs = {'input_shape': backbone.out_shape}
+ transformer = create(cfg['transformer'], **kwargs)
+ # head
+ kwargs = {
+ 'hidden_dim': transformer.hidden_dim,
+ 'nhead': transformer.nhead,
+ 'input_shape': backbone.out_shape
+ }
+ detr_head = create(cfg['detr_head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'transformer': transformer,
+ "detr_head": detr_head,
+ }
+
+ def _forward(self):
+ # Backbone
+ body_feats = self.backbone(self.inputs)
+
+ # Transformer
+ out_transformer = self.transformer(body_feats, self.inputs['pad_mask'])
+
+ # DETR Head
+ if self.training:
+ return self.detr_head(out_transformer, body_feats, self.inputs)
+ else:
+ preds = self.detr_head(out_transformer, body_feats)
+ bbox, bbox_num = self.post_process(preds, self.inputs['im_shape'],
+ self.inputs['scale_factor'])
+ return bbox, bbox_num
+
+ def get_loss(self, ):
+ losses = self._forward()
+ losses.update({
+ 'loss':
+ paddle.add_n([v for k, v in losses.items() if 'log' not in k])
+ })
+ return losses
+
+ def get_pred(self):
+ bbox_pred, bbox_num = self._forward()
+ output = {
+ "bbox": bbox_pred,
+ "bbox_num": bbox_num,
+ }
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/fairmot.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/fairmot.py
new file mode 100644
index 000000000..271450839
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/fairmot.py
@@ -0,0 +1,100 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['FairMOT']
+
+
+@register
+class FairMOT(BaseArch):
+ """
+ FairMOT network, see http://arxiv.org/abs/2004.01888
+
+ Args:
+ detector (object): 'CenterNet' instance
+ reid (object): 'FairMOTEmbeddingHead' instance
+ tracker (object): 'JDETracker' instance
+ loss (object): 'FairMOTLoss' instance
+
+ """
+
+ __category__ = 'architecture'
+ __inject__ = ['loss']
+
+ def __init__(self,
+ detector='CenterNet',
+ reid='FairMOTEmbeddingHead',
+ tracker='JDETracker',
+ loss='FairMOTLoss'):
+ super(FairMOT, self).__init__()
+ self.detector = detector
+ self.reid = reid
+ self.tracker = tracker
+ self.loss = loss
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ detector = create(cfg['detector'])
+ detector_out_shape = detector.neck and detector.neck.out_shape or detector.backbone.out_shape
+
+ kwargs = {'input_shape': detector_out_shape}
+ reid = create(cfg['reid'], **kwargs)
+ loss = create(cfg['loss'])
+ tracker = create(cfg['tracker'])
+
+ return {
+ 'detector': detector,
+ 'reid': reid,
+ 'loss': loss,
+ 'tracker': tracker
+ }
+
+ def _forward(self):
+ loss = dict()
+ # det_outs keys:
+ # train: neck_feat, det_loss, heatmap_loss, size_loss, offset_loss (optional: iou_loss)
+ # eval/infer: neck_feat, bbox, bbox_inds
+ det_outs = self.detector(self.inputs)
+ neck_feat = det_outs['neck_feat']
+ if self.training:
+ reid_loss = self.reid(neck_feat, self.inputs)
+
+ det_loss = det_outs['det_loss']
+ loss = self.loss(det_loss, reid_loss)
+ for k, v in det_outs.items():
+ if 'loss' not in k:
+ continue
+ loss.update({k: v})
+ loss.update({'reid_loss': reid_loss})
+ return loss
+ else:
+ pred_dets, pred_embs = self.reid(
+ neck_feat, self.inputs, det_outs['bbox'], det_outs['bbox_inds'],
+ det_outs['topk_clses'])
+ return pred_dets, pred_embs
+
+ def get_pred(self):
+ output = self._forward()
+ return output
+
+ def get_loss(self):
+ loss = self._forward()
+ return loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/faster_rcnn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/faster_rcnn.py
new file mode 100644
index 000000000..26a2672d6
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/faster_rcnn.py
@@ -0,0 +1,106 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['FasterRCNN']
+
+
+@register
+class FasterRCNN(BaseArch):
+ """
+ Faster R-CNN network, see https://arxiv.org/abs/1506.01497
+
+ Args:
+ backbone (object): backbone instance
+ rpn_head (object): `RPNHead` instance
+ bbox_head (object): `BBoxHead` instance
+ bbox_post_process (object): `BBoxPostProcess` instance
+ neck (object): 'FPN' instance
+ """
+ __category__ = 'architecture'
+ __inject__ = ['bbox_post_process']
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ bbox_head,
+ bbox_post_process,
+ neck=None):
+ super(FasterRCNN, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.rpn_head = rpn_head
+ self.bbox_head = bbox_head
+ self.bbox_post_process = bbox_post_process
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = cfg['neck'] and create(cfg['neck'], **kwargs)
+
+ out_shape = neck and neck.out_shape or backbone.out_shape
+ kwargs = {'input_shape': out_shape}
+ rpn_head = create(cfg['rpn_head'], **kwargs)
+ bbox_head = create(cfg['bbox_head'], **kwargs)
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "rpn_head": rpn_head,
+ "bbox_head": bbox_head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ if self.neck is not None:
+ body_feats = self.neck(body_feats)
+ if self.training:
+ rois, rois_num, rpn_loss = self.rpn_head(body_feats, self.inputs)
+ bbox_loss, _ = self.bbox_head(body_feats, rois, rois_num,
+ self.inputs)
+ return rpn_loss, bbox_loss
+ else:
+ rois, rois_num, _ = self.rpn_head(body_feats, self.inputs)
+ preds, _ = self.bbox_head(body_feats, rois, rois_num, None)
+
+ im_shape = self.inputs['im_shape']
+ scale_factor = self.inputs['scale_factor']
+ bbox, bbox_num = self.bbox_post_process(preds, (rois, rois_num),
+ im_shape, scale_factor)
+
+ # rescale the prediction back to origin image
+ bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num,
+ im_shape, scale_factor)
+ return bbox_pred, bbox_num
+
+ def get_loss(self, ):
+ rpn_loss, bbox_loss = self._forward()
+ loss = {}
+ loss.update(rpn_loss)
+ loss.update(bbox_loss)
+ total_loss = paddle.add_n(list(loss.values()))
+ loss.update({'loss': total_loss})
+ return loss
+
+ def get_pred(self):
+ bbox_pred, bbox_num = self._forward()
+ output = {'bbox': bbox_pred, 'bbox_num': bbox_num}
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/fcos.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/fcos.py
new file mode 100644
index 000000000..8fa5c569b
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/fcos.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['FCOS']
+
+
+@register
+class FCOS(BaseArch):
+ """
+ FCOS network, see https://arxiv.org/abs/1904.01355
+
+ Args:
+ backbone (object): backbone instance
+ neck (object): 'FPN' instance
+ fcos_head (object): 'FCOSHead' instance
+ post_process (object): 'FCOSPostProcess' instance
+ """
+
+ __category__ = 'architecture'
+ __inject__ = ['fcos_post_process']
+
+ def __init__(self,
+ backbone,
+ neck,
+ fcos_head='FCOSHead',
+ fcos_post_process='FCOSPostProcess'):
+ super(FCOS, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.fcos_head = fcos_head
+ self.fcos_post_process = fcos_post_process
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = create(cfg['neck'], **kwargs)
+
+ kwargs = {'input_shape': neck.out_shape}
+ fcos_head = create(cfg['fcos_head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "fcos_head": fcos_head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ fpn_feats = self.neck(body_feats)
+ fcos_head_outs = self.fcos_head(fpn_feats, self.training)
+ if not self.training:
+ scale_factor = self.inputs['scale_factor']
+ bboxes = self.fcos_post_process(fcos_head_outs, scale_factor)
+ return bboxes
+ else:
+ return fcos_head_outs
+
+ def get_loss(self, ):
+ loss = {}
+ tag_labels, tag_bboxes, tag_centerness = [], [], []
+ for i in range(len(self.fcos_head.fpn_stride)):
+ # labels, reg_target, centerness
+ k_lbl = 'labels{}'.format(i)
+ if k_lbl in self.inputs:
+ tag_labels.append(self.inputs[k_lbl])
+ k_box = 'reg_target{}'.format(i)
+ if k_box in self.inputs:
+ tag_bboxes.append(self.inputs[k_box])
+ k_ctn = 'centerness{}'.format(i)
+ if k_ctn in self.inputs:
+ tag_centerness.append(self.inputs[k_ctn])
+
+ fcos_head_outs = self._forward()
+ loss_fcos = self.fcos_head.get_loss(fcos_head_outs, tag_labels,
+ tag_bboxes, tag_centerness)
+ loss.update(loss_fcos)
+ total_loss = paddle.add_n(list(loss.values()))
+ loss.update({'loss': total_loss})
+ return loss
+
+ def get_pred(self):
+ bbox_pred, bbox_num = self._forward()
+ output = {'bbox': bbox_pred, 'bbox_num': bbox_num}
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/gfl.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/gfl.py
new file mode 100644
index 000000000..91c13077f
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/gfl.py
@@ -0,0 +1,87 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['GFL']
+
+
+@register
+class GFL(BaseArch):
+ """
+ Generalized Focal Loss network, see https://arxiv.org/abs/2006.04388
+
+ Args:
+ backbone (object): backbone instance
+ neck (object): 'FPN' instance
+ head (object): 'GFLHead' instance
+ """
+
+ __category__ = 'architecture'
+
+ def __init__(self, backbone, neck, head='GFLHead'):
+ super(GFL, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.head = head
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = create(cfg['neck'], **kwargs)
+
+ kwargs = {'input_shape': neck.out_shape}
+ head = create(cfg['head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "head": head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ fpn_feats = self.neck(body_feats)
+ head_outs = self.head(fpn_feats)
+ if not self.training:
+ im_shape = self.inputs['im_shape']
+ scale_factor = self.inputs['scale_factor']
+ bboxes, bbox_num = self.head.post_process(head_outs, im_shape,
+ scale_factor)
+ return bboxes, bbox_num
+ else:
+ return head_outs
+
+ def get_loss(self, ):
+ loss = {}
+
+ head_outs = self._forward()
+ loss_gfl = self.head.get_loss(head_outs, self.inputs)
+ loss.update(loss_gfl)
+ total_loss = paddle.add_n(list(loss.values()))
+ loss.update({'loss': total_loss})
+ return loss
+
+ def get_pred(self):
+ bbox_pred, bbox_num = self._forward()
+ output = {'bbox': bbox_pred, 'bbox_num': bbox_num}
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/jde.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/jde.py
new file mode 100644
index 000000000..11b45c8c1
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/jde.py
@@ -0,0 +1,110 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['JDE']
+
+
+@register
+class JDE(BaseArch):
+ __category__ = 'architecture'
+ __shared__ = ['metric']
+ """
+ JDE network, see https://arxiv.org/abs/1909.12605v1
+
+ Args:
+ detector (object): detector model instance
+ reid (object): reid model instance
+ tracker (object): tracker instance
+ metric (str): 'MOTDet' for training and detection evaluation, 'ReID'
+ for ReID embedding evaluation, or 'MOT' for multi object tracking
+ evaluation.
+ """
+
+ def __init__(self,
+ detector='YOLOv3',
+ reid='JDEEmbeddingHead',
+ tracker='JDETracker',
+ metric='MOT'):
+ super(JDE, self).__init__()
+ self.detector = detector
+ self.reid = reid
+ self.tracker = tracker
+ self.metric = metric
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ detector = create(cfg['detector'])
+ kwargs = {'input_shape': detector.neck.out_shape}
+
+ reid = create(cfg['reid'], **kwargs)
+
+ tracker = create(cfg['tracker'])
+
+ return {
+ "detector": detector,
+ "reid": reid,
+ "tracker": tracker,
+ }
+
+ def _forward(self):
+ det_outs = self.detector(self.inputs)
+
+ if self.training:
+ emb_feats = det_outs['emb_feats']
+ loss_confs = det_outs['det_losses']['loss_confs']
+ loss_boxes = det_outs['det_losses']['loss_boxes']
+ jde_losses = self.reid(
+ emb_feats,
+ self.inputs,
+ loss_confs=loss_confs,
+ loss_boxes=loss_boxes)
+ return jde_losses
+ else:
+ if self.metric == 'MOTDet':
+ det_results = {
+ 'bbox': det_outs['bbox'],
+ 'bbox_num': det_outs['bbox_num'],
+ }
+ return det_results
+
+ elif self.metric == 'MOT':
+ emb_feats = det_outs['emb_feats']
+ bboxes = det_outs['bbox']
+ boxes_idx = det_outs['boxes_idx']
+ nms_keep_idx = det_outs['nms_keep_idx']
+
+ pred_dets, pred_embs = self.reid(
+ emb_feats,
+ self.inputs,
+ bboxes=bboxes,
+ boxes_idx=boxes_idx,
+ nms_keep_idx=nms_keep_idx)
+ return pred_dets, pred_embs
+
+ else:
+ raise ValueError("Unknown metric {} for multi object tracking.".
+ format(self.metric))
+
+ def get_loss(self):
+ return self._forward()
+
+ def get_pred(self):
+ return self._forward()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/keypoint_hrhrnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/keypoint_hrhrnet.py
new file mode 100644
index 000000000..6f62b4b21
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/keypoint_hrhrnet.py
@@ -0,0 +1,287 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from scipy.optimize import linear_sum_assignment
+from collections import abc, defaultdict
+import numpy as np
+import paddle
+
+from ppdet.core.workspace import register, create, serializable
+from .meta_arch import BaseArch
+from .. import layers as L
+from ..keypoint_utils import transpred
+
+__all__ = ['HigherHRNet']
+
+
+@register
+class HigherHRNet(BaseArch):
+ __category__ = 'architecture'
+
+ def __init__(self,
+ backbone='HRNet',
+ hrhrnet_head='HrHRNetHead',
+ post_process='HrHRNetPostProcess',
+ eval_flip=True,
+ flip_perm=None,
+ max_num_people=30):
+ """
+ HigherHRNet network, see https://arxiv.org/abs/1908.10357;
+ HigherHRNet+swahr, see https://arxiv.org/abs/2012.15175
+
+ Args:
+ backbone (nn.Layer): backbone instance
+ hrhrnet_head (nn.Layer): keypoint_head instance
+ bbox_post_process (object): `BBoxPostProcess` instance
+ """
+ super(HigherHRNet, self).__init__()
+ self.backbone = backbone
+ self.hrhrnet_head = hrhrnet_head
+ self.post_process = post_process
+ self.flip = eval_flip
+ self.flip_perm = paddle.to_tensor(flip_perm)
+ self.deploy = False
+ self.interpolate = L.Upsample(2, mode='bilinear')
+ self.pool = L.MaxPool(5, 1, 2)
+ self.max_num_people = max_num_people
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ # backbone
+ backbone = create(cfg['backbone'])
+ # head
+ kwargs = {'input_shape': backbone.out_shape}
+ hrhrnet_head = create(cfg['hrhrnet_head'], **kwargs)
+ post_process = create(cfg['post_process'])
+
+ return {
+ 'backbone': backbone,
+ "hrhrnet_head": hrhrnet_head,
+ "post_process": post_process,
+ }
+
+ def _forward(self):
+ if self.flip and not self.training and not self.deploy:
+ self.inputs['image'] = paddle.concat(
+ (self.inputs['image'], paddle.flip(self.inputs['image'], [3])))
+ body_feats = self.backbone(self.inputs)
+
+ if self.training:
+ return self.hrhrnet_head(body_feats, self.inputs)
+ else:
+ outputs = self.hrhrnet_head(body_feats)
+
+ if self.flip and not self.deploy:
+ outputs = [paddle.split(o, 2) for o in outputs]
+ output_rflip = [
+ paddle.flip(paddle.gather(o[1], self.flip_perm, 1), [3])
+ for o in outputs
+ ]
+ output1 = [o[0] for o in outputs]
+ heatmap = (output1[0] + output_rflip[0]) / 2.
+ tagmaps = [output1[1], output_rflip[1]]
+ outputs = [heatmap] + tagmaps
+ outputs = self.get_topk(outputs)
+
+ if self.deploy:
+ return outputs
+
+ res_lst = []
+ h = self.inputs['im_shape'][0, 0].numpy().item()
+ w = self.inputs['im_shape'][0, 1].numpy().item()
+ kpts, scores = self.post_process(*outputs, h, w)
+ res_lst.append([kpts, scores])
+ return res_lst
+
+ def get_loss(self):
+ return self._forward()
+
+ def get_pred(self):
+ outputs = {}
+ res_lst = self._forward()
+ outputs['keypoint'] = res_lst
+ return outputs
+
+ def get_topk(self, outputs):
+ # resize to image size
+ outputs = [self.interpolate(x) for x in outputs]
+ if len(outputs) == 3:
+ tagmap = paddle.concat(
+ (outputs[1].unsqueeze(4), outputs[2].unsqueeze(4)), axis=4)
+ else:
+ tagmap = outputs[1].unsqueeze(4)
+
+ heatmap = outputs[0]
+ N, J = 1, self.hrhrnet_head.num_joints
+ heatmap_maxpool = self.pool(heatmap)
+ # topk
+ maxmap = heatmap * (heatmap == heatmap_maxpool)
+ maxmap = maxmap.reshape([N, J, -1])
+ heat_k, inds_k = maxmap.topk(self.max_num_people, axis=2)
+
+ outputs = [heatmap, tagmap, heat_k, inds_k]
+ return outputs
+
+
+@register
+@serializable
+class HrHRNetPostProcess(object):
+ '''
+ HrHRNet postprocess contain:
+ 1) get topk keypoints in the output heatmap
+ 2) sample the tagmap's value corresponding to each of the topk coordinate
+ 3) match different joints to combine to some people with Hungary algorithm
+ 4) adjust the coordinate by +-0.25 to decrease error std
+ 5) salvage missing joints by check positivity of heatmap - tagdiff_norm
+ Args:
+ max_num_people (int): max number of people support in postprocess
+ heat_thresh (float): value of topk below this threshhold will be ignored
+ tag_thresh (float): coord's value sampled in tagmap below this threshold belong to same people for init
+
+ inputs(list[heatmap]): the output list of modle, [heatmap, heatmap_maxpool, tagmap], heatmap_maxpool used to get topk
+ original_height, original_width (float): the original image size
+ '''
+
+ def __init__(self, max_num_people=30, heat_thresh=0.1, tag_thresh=1.):
+ self.max_num_people = max_num_people
+ self.heat_thresh = heat_thresh
+ self.tag_thresh = tag_thresh
+
+ def lerp(self, j, y, x, heatmap):
+ H, W = heatmap.shape[-2:]
+ left = np.clip(x - 1, 0, W - 1)
+ right = np.clip(x + 1, 0, W - 1)
+ up = np.clip(y - 1, 0, H - 1)
+ down = np.clip(y + 1, 0, H - 1)
+ offset_y = np.where(heatmap[j, down, x] > heatmap[j, up, x], 0.25,
+ -0.25)
+ offset_x = np.where(heatmap[j, y, right] > heatmap[j, y, left], 0.25,
+ -0.25)
+ return offset_y + 0.5, offset_x + 0.5
+
+ def __call__(self, heatmap, tagmap, heat_k, inds_k, original_height,
+ original_width):
+
+ N, J, H, W = heatmap.shape
+ assert N == 1, "only support batch size 1"
+ heatmap = heatmap[0].cpu().detach().numpy()
+ tagmap = tagmap[0].cpu().detach().numpy()
+ heats = heat_k[0].cpu().detach().numpy()
+ inds_np = inds_k[0].cpu().detach().numpy()
+ y = inds_np // W
+ x = inds_np % W
+ tags = tagmap[np.arange(J)[None, :].repeat(self.max_num_people),
+ y.flatten(), x.flatten()].reshape(J, -1, tagmap.shape[-1])
+ coords = np.stack((y, x), axis=2)
+ # threshold
+ mask = heats > self.heat_thresh
+ # cluster
+ cluster = defaultdict(lambda: {
+ 'coords': np.zeros((J, 2), dtype=np.float32),
+ 'scores': np.zeros(J, dtype=np.float32),
+ 'tags': []
+ })
+ for jid, m in enumerate(mask):
+ num_valid = m.sum()
+ if num_valid == 0:
+ continue
+ valid_inds = np.where(m)[0]
+ valid_tags = tags[jid, m, :]
+ if len(cluster) == 0: # initialize
+ for i in valid_inds:
+ tag = tags[jid, i]
+ key = tag[0]
+ cluster[key]['tags'].append(tag)
+ cluster[key]['scores'][jid] = heats[jid, i]
+ cluster[key]['coords'][jid] = coords[jid, i]
+ continue
+ candidates = list(cluster.keys())[:self.max_num_people]
+ centroids = [
+ np.mean(
+ cluster[k]['tags'], axis=0) for k in candidates
+ ]
+ num_clusters = len(centroids)
+ # shape is (num_valid, num_clusters, tag_dim)
+ dist = valid_tags[:, None, :] - np.array(centroids)[None, ...]
+ l2_dist = np.linalg.norm(dist, ord=2, axis=2)
+ # modulate dist with heat value, see `use_detection_val`
+ cost = np.round(l2_dist) * 100 - heats[jid, m, None]
+ # pad the cost matrix, otherwise new pose are ignored
+ if num_valid > num_clusters:
+ cost = np.pad(cost, ((0, 0), (0, num_valid - num_clusters)),
+ 'constant',
+ constant_values=((0, 0), (0, 1e-10)))
+ rows, cols = linear_sum_assignment(cost)
+ for y, x in zip(rows, cols):
+ tag = tags[jid, y]
+ if y < num_valid and x < num_clusters and \
+ l2_dist[y, x] < self.tag_thresh:
+ key = candidates[x] # merge to cluster
+ else:
+ key = tag[0] # initialize new cluster
+ cluster[key]['tags'].append(tag)
+ cluster[key]['scores'][jid] = heats[jid, y]
+ cluster[key]['coords'][jid] = coords[jid, y]
+
+ # shape is [k, J, 2] and [k, J]
+ pose_tags = np.array([cluster[k]['tags'] for k in cluster])
+ pose_coords = np.array([cluster[k]['coords'] for k in cluster])
+ pose_scores = np.array([cluster[k]['scores'] for k in cluster])
+ valid = pose_scores > 0
+
+ pose_kpts = np.zeros((pose_scores.shape[0], J, 3), dtype=np.float32)
+ if valid.sum() == 0:
+ return pose_kpts, pose_kpts
+
+ # refine coords
+ valid_coords = pose_coords[valid].astype(np.int32)
+ y = valid_coords[..., 0].flatten()
+ x = valid_coords[..., 1].flatten()
+ _, j = np.nonzero(valid)
+ offsets = self.lerp(j, y, x, heatmap)
+ pose_coords[valid, 0] += offsets[0]
+ pose_coords[valid, 1] += offsets[1]
+
+ # mean score before salvage
+ mean_score = pose_scores.mean(axis=1)
+ pose_kpts[valid, 2] = pose_scores[valid]
+
+ # salvage missing joints
+ if True:
+ for pid, coords in enumerate(pose_coords):
+ tag_mean = np.array(pose_tags[pid]).mean(axis=0)
+ norm = np.sum((tagmap - tag_mean)**2, axis=3)**0.5
+ score = heatmap - np.round(norm) # (J, H, W)
+ flat_score = score.reshape(J, -1)
+ max_inds = np.argmax(flat_score, axis=1)
+ max_scores = np.max(flat_score, axis=1)
+ salvage_joints = (pose_scores[pid] == 0) & (max_scores > 0)
+ if salvage_joints.sum() == 0:
+ continue
+ y = max_inds[salvage_joints] // W
+ x = max_inds[salvage_joints] % W
+ offsets = self.lerp(salvage_joints.nonzero()[0], y, x, heatmap)
+ y = y.astype(np.float32) + offsets[0]
+ x = x.astype(np.float32) + offsets[1]
+ pose_coords[pid][salvage_joints, 0] = y
+ pose_coords[pid][salvage_joints, 1] = x
+ pose_kpts[pid][salvage_joints, 2] = max_scores[salvage_joints]
+ pose_kpts[..., :2] = transpred(pose_coords[..., :2][..., ::-1],
+ original_height, original_width,
+ min(H, W))
+ return pose_kpts, mean_score
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/keypoint_hrnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/keypoint_hrnet.py
new file mode 100644
index 000000000..914bd043c
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/keypoint_hrnet.py
@@ -0,0 +1,267 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import numpy as np
+import math
+import cv2
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+from ..keypoint_utils import transform_preds
+from .. import layers as L
+
+__all__ = ['TopDownHRNet']
+
+
+@register
+class TopDownHRNet(BaseArch):
+ __category__ = 'architecture'
+ __inject__ = ['loss']
+
+ def __init__(self,
+ width,
+ num_joints,
+ backbone='HRNet',
+ loss='KeyPointMSELoss',
+ post_process='HRNetPostProcess',
+ flip_perm=None,
+ flip=True,
+ shift_heatmap=True,
+ use_dark=True):
+ """
+ HRNet network, see https://arxiv.org/abs/1902.09212
+
+ Args:
+ backbone (nn.Layer): backbone instance
+ post_process (object): `HRNetPostProcess` instance
+ flip_perm (list): The left-right joints exchange order list
+ use_dark(bool): Whether to use DARK in post processing
+ """
+ super(TopDownHRNet, self).__init__()
+ self.backbone = backbone
+ self.post_process = HRNetPostProcess(use_dark)
+ self.loss = loss
+ self.flip_perm = flip_perm
+ self.flip = flip
+ self.final_conv = L.Conv2d(width, num_joints, 1, 1, 0, bias=True)
+ self.shift_heatmap = shift_heatmap
+ self.deploy = False
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ # backbone
+ backbone = create(cfg['backbone'])
+
+ return {'backbone': backbone, }
+
+ def _forward(self):
+ feats = self.backbone(self.inputs)
+ hrnet_outputs = self.final_conv(feats[0])
+
+ if self.training:
+ return self.loss(hrnet_outputs, self.inputs)
+ elif self.deploy:
+ outshape = hrnet_outputs.shape
+ max_idx = paddle.argmax(
+ hrnet_outputs.reshape(
+ (outshape[0], outshape[1], outshape[2] * outshape[3])),
+ axis=-1)
+ return hrnet_outputs, max_idx
+ else:
+ if self.flip:
+ self.inputs['image'] = self.inputs['image'].flip([3])
+ feats = self.backbone(self.inputs)
+ output_flipped = self.final_conv(feats[0])
+ output_flipped = self.flip_back(output_flipped.numpy(),
+ self.flip_perm)
+ output_flipped = paddle.to_tensor(output_flipped.copy())
+ if self.shift_heatmap:
+ output_flipped[:, :, :, 1:] = output_flipped.clone(
+ )[:, :, :, 0:-1]
+ hrnet_outputs = (hrnet_outputs + output_flipped) * 0.5
+ imshape = (self.inputs['im_shape'].numpy()
+ )[:, ::-1] if 'im_shape' in self.inputs else None
+ center = self.inputs['center'].numpy(
+ ) if 'center' in self.inputs else np.round(imshape / 2.)
+ scale = self.inputs['scale'].numpy(
+ ) if 'scale' in self.inputs else imshape / 200.
+ outputs = self.post_process(hrnet_outputs, center, scale)
+ return outputs
+
+ def get_loss(self):
+ return self._forward()
+
+ def get_pred(self):
+ res_lst = self._forward()
+ outputs = {'keypoint': res_lst}
+ return outputs
+
+ def flip_back(self, output_flipped, matched_parts):
+ assert output_flipped.ndim == 4,\
+ 'output_flipped should be [batch_size, num_joints, height, width]'
+
+ output_flipped = output_flipped[:, :, :, ::-1]
+
+ for pair in matched_parts:
+ tmp = output_flipped[:, pair[0], :, :].copy()
+ output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :]
+ output_flipped[:, pair[1], :, :] = tmp
+
+ return output_flipped
+
+
+class HRNetPostProcess(object):
+ def __init__(self, use_dark=True):
+ self.use_dark = use_dark
+
+ def get_max_preds(self, heatmaps):
+ '''get predictions from score maps
+
+ Args:
+ heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
+
+ Returns:
+ preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
+ maxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the keypoints
+ '''
+ assert isinstance(heatmaps,
+ np.ndarray), 'heatmaps should be numpy.ndarray'
+ assert heatmaps.ndim == 4, 'batch_images should be 4-ndim'
+
+ batch_size = heatmaps.shape[0]
+ num_joints = heatmaps.shape[1]
+ width = heatmaps.shape[3]
+ heatmaps_reshaped = heatmaps.reshape((batch_size, num_joints, -1))
+ idx = np.argmax(heatmaps_reshaped, 2)
+ maxvals = np.amax(heatmaps_reshaped, 2)
+
+ maxvals = maxvals.reshape((batch_size, num_joints, 1))
+ idx = idx.reshape((batch_size, num_joints, 1))
+
+ preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
+
+ preds[:, :, 0] = (preds[:, :, 0]) % width
+ preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
+
+ pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
+ pred_mask = pred_mask.astype(np.float32)
+
+ preds *= pred_mask
+
+ return preds, maxvals
+
+ def gaussian_blur(self, heatmap, kernel):
+ border = (kernel - 1) // 2
+ batch_size = heatmap.shape[0]
+ num_joints = heatmap.shape[1]
+ height = heatmap.shape[2]
+ width = heatmap.shape[3]
+ for i in range(batch_size):
+ for j in range(num_joints):
+ origin_max = np.max(heatmap[i, j])
+ dr = np.zeros((height + 2 * border, width + 2 * border))
+ dr[border:-border, border:-border] = heatmap[i, j].copy()
+ dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
+ heatmap[i, j] = dr[border:-border, border:-border].copy()
+ heatmap[i, j] *= origin_max / np.max(heatmap[i, j])
+ return heatmap
+
+ def dark_parse(self, hm, coord):
+ heatmap_height = hm.shape[0]
+ heatmap_width = hm.shape[1]
+ px = int(coord[0])
+ py = int(coord[1])
+ if 1 < px < heatmap_width - 2 and 1 < py < heatmap_height - 2:
+ dx = 0.5 * (hm[py][px + 1] - hm[py][px - 1])
+ dy = 0.5 * (hm[py + 1][px] - hm[py - 1][px])
+ dxx = 0.25 * (hm[py][px + 2] - 2 * hm[py][px] + hm[py][px - 2])
+ dxy = 0.25 * (hm[py+1][px+1] - hm[py-1][px+1] - hm[py+1][px-1] \
+ + hm[py-1][px-1])
+ dyy = 0.25 * (
+ hm[py + 2 * 1][px] - 2 * hm[py][px] + hm[py - 2 * 1][px])
+ derivative = np.matrix([[dx], [dy]])
+ hessian = np.matrix([[dxx, dxy], [dxy, dyy]])
+ if dxx * dyy - dxy**2 != 0:
+ hessianinv = hessian.I
+ offset = -hessianinv * derivative
+ offset = np.squeeze(np.array(offset.T), axis=0)
+ coord += offset
+ return coord
+
+ def dark_postprocess(self, hm, coords, kernelsize):
+ '''DARK postpocessing, Zhang et al. Distribution-Aware Coordinate
+ Representation for Human Pose Estimation (CVPR 2020).
+ '''
+
+ hm = self.gaussian_blur(hm, kernelsize)
+ hm = np.maximum(hm, 1e-10)
+ hm = np.log(hm)
+ for n in range(coords.shape[0]):
+ for p in range(coords.shape[1]):
+ coords[n, p] = self.dark_parse(hm[n][p], coords[n][p])
+ return coords
+
+ def get_final_preds(self, heatmaps, center, scale, kernelsize=3):
+ """the highest heatvalue location with a quarter offset in the
+ direction from the highest response to the second highest response.
+
+ Args:
+ heatmaps (numpy.ndarray): The predicted heatmaps
+ center (numpy.ndarray): The boxes center
+ scale (numpy.ndarray): The scale factor
+
+ Returns:
+ preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
+ maxvals: numpy.ndarray([batch_size, num_joints, 1]), the maximum confidence of the keypoints
+ """
+ coords, maxvals = self.get_max_preds(heatmaps)
+
+ heatmap_height = heatmaps.shape[2]
+ heatmap_width = heatmaps.shape[3]
+
+ if self.use_dark:
+ coords = self.dark_postprocess(heatmaps, coords, kernelsize)
+ else:
+ for n in range(coords.shape[0]):
+ for p in range(coords.shape[1]):
+ hm = heatmaps[n][p]
+ px = int(math.floor(coords[n][p][0] + 0.5))
+ py = int(math.floor(coords[n][p][1] + 0.5))
+ if 1 < px < heatmap_width - 1 and 1 < py < heatmap_height - 1:
+ diff = np.array([
+ hm[py][px + 1] - hm[py][px - 1],
+ hm[py + 1][px] - hm[py - 1][px]
+ ])
+ coords[n][p] += np.sign(diff) * .25
+ preds = coords.copy()
+
+ # Transform back
+ for i in range(coords.shape[0]):
+ preds[i] = transform_preds(coords[i], center[i], scale[i],
+ [heatmap_width, heatmap_height])
+
+ return preds, maxvals
+
+ def __call__(self, output, center, scale):
+ preds, maxvals = self.get_final_preds(output.numpy(), center, scale)
+ outputs = [[
+ np.concatenate(
+ (preds, maxvals), axis=-1), np.mean(
+ maxvals, axis=1)
+ ]]
+ return outputs
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/mask_rcnn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/mask_rcnn.py
new file mode 100644
index 000000000..071a326f4
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/mask_rcnn.py
@@ -0,0 +1,135 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['MaskRCNN']
+
+
+@register
+class MaskRCNN(BaseArch):
+ """
+ Mask R-CNN network, see https://arxiv.org/abs/1703.06870
+
+ Args:
+ backbone (object): backbone instance
+ rpn_head (object): `RPNHead` instance
+ bbox_head (object): `BBoxHead` instance
+ mask_head (object): `MaskHead` instance
+ bbox_post_process (object): `BBoxPostProcess` instance
+ mask_post_process (object): `MaskPostProcess` instance
+ neck (object): 'FPN' instance
+ """
+
+ __category__ = 'architecture'
+ __inject__ = [
+ 'bbox_post_process',
+ 'mask_post_process',
+ ]
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ bbox_head,
+ mask_head,
+ bbox_post_process,
+ mask_post_process,
+ neck=None):
+ super(MaskRCNN, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.rpn_head = rpn_head
+ self.bbox_head = bbox_head
+ self.mask_head = mask_head
+
+ self.bbox_post_process = bbox_post_process
+ self.mask_post_process = mask_post_process
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = cfg['neck'] and create(cfg['neck'], **kwargs)
+
+ out_shape = neck and neck.out_shape or backbone.out_shape
+ kwargs = {'input_shape': out_shape}
+ rpn_head = create(cfg['rpn_head'], **kwargs)
+ bbox_head = create(cfg['bbox_head'], **kwargs)
+
+ out_shape = neck and out_shape or bbox_head.get_head().out_shape
+ kwargs = {'input_shape': out_shape}
+ mask_head = create(cfg['mask_head'], **kwargs)
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "rpn_head": rpn_head,
+ "bbox_head": bbox_head,
+ "mask_head": mask_head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ if self.neck is not None:
+ body_feats = self.neck(body_feats)
+
+ if self.training:
+ rois, rois_num, rpn_loss = self.rpn_head(body_feats, self.inputs)
+ bbox_loss, bbox_feat = self.bbox_head(body_feats, rois, rois_num,
+ self.inputs)
+ rois, rois_num = self.bbox_head.get_assigned_rois()
+ bbox_targets = self.bbox_head.get_assigned_targets()
+ # Mask Head needs bbox_feat in Mask RCNN
+ mask_loss = self.mask_head(body_feats, rois, rois_num, self.inputs,
+ bbox_targets, bbox_feat)
+ return rpn_loss, bbox_loss, mask_loss
+ else:
+ rois, rois_num, _ = self.rpn_head(body_feats, self.inputs)
+ preds, feat_func = self.bbox_head(body_feats, rois, rois_num, None)
+
+ im_shape = self.inputs['im_shape']
+ scale_factor = self.inputs['scale_factor']
+
+ bbox, bbox_num = self.bbox_post_process(preds, (rois, rois_num),
+ im_shape, scale_factor)
+ mask_out = self.mask_head(
+ body_feats, bbox, bbox_num, self.inputs, feat_func=feat_func)
+
+ # rescale the prediction back to origin image
+ bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num,
+ im_shape, scale_factor)
+ origin_shape = self.bbox_post_process.get_origin_shape()
+ mask_pred = self.mask_post_process(mask_out[:, 0, :, :], bbox_pred,
+ bbox_num, origin_shape)
+ return bbox_pred, bbox_num, mask_pred
+
+ def get_loss(self, ):
+ bbox_loss, mask_loss, rpn_loss = self._forward()
+ loss = {}
+ loss.update(rpn_loss)
+ loss.update(bbox_loss)
+ loss.update(mask_loss)
+ total_loss = paddle.add_n(list(loss.values()))
+ loss.update({'loss': total_loss})
+ return loss
+
+ def get_pred(self):
+ bbox_pred, bbox_num, mask_pred = self._forward()
+ output = {'bbox': bbox_pred, 'bbox_num': bbox_num, 'mask': mask_pred}
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/meta_arch.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/meta_arch.py
new file mode 100644
index 000000000..d9875e183
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/meta_arch.py
@@ -0,0 +1,72 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+from ppdet.core.workspace import register
+
+__all__ = ['BaseArch']
+
+
+@register
+class BaseArch(nn.Layer):
+ def __init__(self, data_format='NCHW'):
+ super(BaseArch, self).__init__()
+ self.data_format = data_format
+ self.inputs = {}
+ self.fuse_norm = False
+
+ def load_meanstd(self, cfg_transform):
+ self.scale = 1.
+ self.mean = paddle.to_tensor([0.485, 0.456, 0.406]).reshape(
+ (1, 3, 1, 1))
+ self.std = paddle.to_tensor([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))
+ for item in cfg_transform:
+ if 'NormalizeImage' in item:
+ self.mean = paddle.to_tensor(item['NormalizeImage'][
+ 'mean']).reshape((1, 3, 1, 1))
+ self.std = paddle.to_tensor(item['NormalizeImage'][
+ 'std']).reshape((1, 3, 1, 1))
+ if item['NormalizeImage'].get('is_scale', True):
+ self.scale = 1. / 255.
+ break
+ if self.data_format == 'NHWC':
+ self.mean = self.mean.reshape(1, 1, 1, 3)
+ self.std = self.std.reshape(1, 1, 1, 3)
+
+ def forward(self, inputs):
+ if self.data_format == 'NHWC':
+ image = inputs['image']
+ inputs['image'] = paddle.transpose(image, [0, 2, 3, 1])
+
+ if self.fuse_norm:
+ image = inputs['image']
+ self.inputs['image'] = (image * self.scale - self.mean) / self.std
+ self.inputs['im_shape'] = inputs['im_shape']
+ self.inputs['scale_factor'] = inputs['scale_factor']
+ else:
+ self.inputs = inputs
+
+ self.model_arch()
+
+ if self.training:
+ out = self.get_loss()
+ else:
+ out = self.get_pred()
+ return out
+
+ def build_inputs(self, data, input_def):
+ inputs = {}
+ for i, k in enumerate(input_def):
+ inputs[k] = data[i]
+ return inputs
+
+ def model_arch(self, ):
+ pass
+
+ def get_loss(self, ):
+ raise NotImplementedError("Should implement get_loss method!")
+
+ def get_pred(self, ):
+ raise NotImplementedError("Should implement get_pred method!")
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/picodet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/picodet.py
new file mode 100644
index 000000000..cd807a9fa
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/picodet.py
@@ -0,0 +1,91 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['PicoDet']
+
+
+@register
+class PicoDet(BaseArch):
+ """
+ Generalized Focal Loss network, see https://arxiv.org/abs/2006.04388
+
+ Args:
+ backbone (object): backbone instance
+ neck (object): 'FPN' instance
+ head (object): 'PicoHead' instance
+ """
+
+ __category__ = 'architecture'
+
+ def __init__(self, backbone, neck, head='PicoHead'):
+ super(PicoDet, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.head = head
+ self.deploy = False
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = create(cfg['neck'], **kwargs)
+
+ kwargs = {'input_shape': neck.out_shape}
+ head = create(cfg['head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "head": head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ fpn_feats = self.neck(body_feats)
+ head_outs = self.head(fpn_feats, self.deploy)
+ if self.training or self.deploy:
+ return head_outs, None
+ else:
+ im_shape = self.inputs['im_shape']
+ scale_factor = self.inputs['scale_factor']
+ bboxes, bbox_num = self.head.post_process(head_outs, im_shape,
+ scale_factor)
+ return bboxes, bbox_num
+
+ def get_loss(self, ):
+ loss = {}
+
+ head_outs, _ = self._forward()
+ loss_gfl = self.head.get_loss(head_outs, self.inputs)
+ loss.update(loss_gfl)
+ total_loss = paddle.add_n(list(loss.values()))
+ loss.update({'loss': total_loss})
+ return loss
+
+ def get_pred(self):
+ if self.deploy:
+ return {'picodet': self._forward()[0]}
+ else:
+ bbox_pred, bbox_num = self._forward()
+ output = {'bbox': bbox_pred, 'bbox_num': bbox_num}
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/s2anet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/s2anet.py
new file mode 100644
index 000000000..ecfc987f9
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/s2anet.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['S2ANet']
+
+
+@register
+class S2ANet(BaseArch):
+ __category__ = 'architecture'
+ __inject__ = [
+ 's2anet_head',
+ 's2anet_bbox_post_process',
+ ]
+
+ def __init__(self, backbone, neck, s2anet_head, s2anet_bbox_post_process):
+ """
+ S2ANet, see https://arxiv.org/pdf/2008.09397.pdf
+
+ Args:
+ backbone (object): backbone instance
+ neck (object): `FPN` instance
+ s2anet_head (object): `S2ANetHead` instance
+ s2anet_bbox_post_process (object): `S2ANetBBoxPostProcess` instance
+ """
+ super(S2ANet, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.s2anet_head = s2anet_head
+ self.s2anet_bbox_post_process = s2anet_bbox_post_process
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = cfg['neck'] and create(cfg['neck'], **kwargs)
+
+ out_shape = neck and neck.out_shape or backbone.out_shape
+ kwargs = {'input_shape': out_shape}
+ s2anet_head = create(cfg['s2anet_head'], **kwargs)
+ s2anet_bbox_post_process = create(cfg['s2anet_bbox_post_process'],
+ **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "s2anet_head": s2anet_head,
+ "s2anet_bbox_post_process": s2anet_bbox_post_process,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ if self.neck is not None:
+ body_feats = self.neck(body_feats)
+ self.s2anet_head(body_feats)
+ if self.training:
+ loss = self.s2anet_head.get_loss(self.inputs)
+ total_loss = paddle.add_n(list(loss.values()))
+ loss.update({'loss': total_loss})
+ return loss
+ else:
+ im_shape = self.inputs['im_shape']
+ scale_factor = self.inputs['scale_factor']
+ nms_pre = self.s2anet_bbox_post_process.nms_pre
+ pred_scores, pred_bboxes = self.s2anet_head.get_prediction(nms_pre)
+
+ # post_process
+ pred_bboxes, bbox_num = self.s2anet_bbox_post_process(pred_scores,
+ pred_bboxes)
+ # rescale the prediction back to origin image
+ pred_bboxes = self.s2anet_bbox_post_process.get_pred(
+ pred_bboxes, bbox_num, im_shape, scale_factor)
+
+ # output
+ output = {'bbox': pred_bboxes, 'bbox_num': bbox_num}
+ return output
+
+ def get_loss(self, ):
+ loss = self._forward()
+ return loss
+
+ def get_pred(self):
+ output = self._forward()
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/solov2.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/solov2.py
new file mode 100644
index 000000000..4e5fc2118
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/solov2.py
@@ -0,0 +1,110 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['SOLOv2']
+
+
+@register
+class SOLOv2(BaseArch):
+ """
+ SOLOv2 network, see https://arxiv.org/abs/2003.10152
+
+ Args:
+ backbone (object): an backbone instance
+ solov2_head (object): an `SOLOv2Head` instance
+ mask_head (object): an `SOLOv2MaskHead` instance
+ neck (object): neck of network, such as feature pyramid network instance
+ """
+
+ __category__ = 'architecture'
+
+ def __init__(self, backbone, solov2_head, mask_head, neck=None):
+ super(SOLOv2, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.solov2_head = solov2_head
+ self.mask_head = mask_head
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = create(cfg['neck'], **kwargs)
+
+ kwargs = {'input_shape': neck.out_shape}
+ solov2_head = create(cfg['solov2_head'], **kwargs)
+ mask_head = create(cfg['mask_head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ 'solov2_head': solov2_head,
+ 'mask_head': mask_head,
+ }
+
+ def model_arch(self):
+ body_feats = self.backbone(self.inputs)
+
+ body_feats = self.neck(body_feats)
+
+ self.seg_pred = self.mask_head(body_feats)
+
+ self.cate_pred_list, self.kernel_pred_list = self.solov2_head(
+ body_feats)
+
+ def get_loss(self, ):
+ loss = {}
+ # get gt_ins_labels, gt_cate_labels, etc.
+ gt_ins_labels, gt_cate_labels, gt_grid_orders = [], [], []
+ fg_num = self.inputs['fg_num']
+ for i in range(len(self.solov2_head.seg_num_grids)):
+ ins_label = 'ins_label{}'.format(i)
+ if ins_label in self.inputs:
+ gt_ins_labels.append(self.inputs[ins_label])
+ cate_label = 'cate_label{}'.format(i)
+ if cate_label in self.inputs:
+ gt_cate_labels.append(self.inputs[cate_label])
+ grid_order = 'grid_order{}'.format(i)
+ if grid_order in self.inputs:
+ gt_grid_orders.append(self.inputs[grid_order])
+
+ loss_solov2 = self.solov2_head.get_loss(
+ self.cate_pred_list, self.kernel_pred_list, self.seg_pred,
+ gt_ins_labels, gt_cate_labels, gt_grid_orders, fg_num)
+ loss.update(loss_solov2)
+ total_loss = paddle.add_n(list(loss.values()))
+ loss.update({'loss': total_loss})
+ return loss
+
+ def get_pred(self):
+ seg_masks, cate_labels, cate_scores, bbox_num = self.solov2_head.get_prediction(
+ self.cate_pred_list, self.kernel_pred_list, self.seg_pred,
+ self.inputs['im_shape'], self.inputs['scale_factor'])
+ outs = {
+ "segm": seg_masks,
+ "bbox_num": bbox_num,
+ 'cate_label': cate_labels,
+ 'cate_score': cate_scores
+ }
+ return outs
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/sparse_rcnn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/sparse_rcnn.py
new file mode 100644
index 000000000..34c29498b
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/sparse_rcnn.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ["SparseRCNN"]
+
+
+@register
+class SparseRCNN(BaseArch):
+ __category__ = 'architecture'
+ __inject__ = ["postprocess"]
+
+ def __init__(self,
+ backbone,
+ neck,
+ head="SparsercnnHead",
+ postprocess="SparsePostProcess"):
+ super(SparseRCNN, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.head = head
+ self.postprocess = postprocess
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = create(cfg['neck'], **kwargs)
+
+ kwargs = {'roi_input_shape': neck.out_shape}
+ head = create(cfg['head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "head": head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ fpn_feats = self.neck(body_feats)
+ head_outs = self.head(fpn_feats, self.inputs["img_whwh"])
+
+ if not self.training:
+ bboxes = self.postprocess(
+ head_outs["pred_logits"], head_outs["pred_boxes"],
+ self.inputs["scale_factor_wh"], self.inputs["img_whwh"])
+ return bboxes
+ else:
+ return head_outs
+
+ def get_loss(self):
+ batch_gt_class = self.inputs["gt_class"]
+ batch_gt_box = self.inputs["gt_bbox"]
+ batch_whwh = self.inputs["img_whwh"]
+ targets = []
+
+ for i in range(len(batch_gt_class)):
+ boxes = batch_gt_box[i]
+ labels = batch_gt_class[i].squeeze(-1)
+ img_whwh = batch_whwh[i]
+ img_whwh_tgt = img_whwh.unsqueeze(0).tile([int(boxes.shape[0]), 1])
+ targets.append({
+ "boxes": boxes,
+ "labels": labels,
+ "img_whwh": img_whwh,
+ "img_whwh_tgt": img_whwh_tgt
+ })
+
+ outputs = self._forward()
+ loss_dict = self.head.get_loss(outputs, targets)
+ acc = loss_dict["acc"]
+ loss_dict.pop("acc")
+ total_loss = sum(loss_dict.values())
+ loss_dict.update({"loss": total_loss, "acc": acc})
+ return loss_dict
+
+ def get_pred(self):
+ bbox_pred, bbox_num = self._forward()
+ output = {'bbox': bbox_pred, 'bbox_num': bbox_num}
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/ssd.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/ssd.py
new file mode 100644
index 000000000..34bf24108
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/ssd.py
@@ -0,0 +1,92 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['SSD']
+
+
+@register
+class SSD(BaseArch):
+ """
+ Single Shot MultiBox Detector, see https://arxiv.org/abs/1512.02325
+
+ Args:
+ backbone (nn.Layer): backbone instance
+ ssd_head (nn.Layer): `SSDHead` instance
+ post_process (object): `BBoxPostProcess` instance
+ """
+
+ __category__ = 'architecture'
+ __inject__ = ['post_process']
+
+ def __init__(self, backbone, ssd_head, post_process, r34_backbone=False):
+ super(SSD, self).__init__()
+ self.backbone = backbone
+ self.ssd_head = ssd_head
+ self.post_process = post_process
+ self.r34_backbone = r34_backbone
+ if self.r34_backbone:
+ from ppdet.modeling.backbones.resnet import ResNet
+ assert isinstance(self.backbone, ResNet) and \
+ self.backbone.depth == 34, \
+ "If you set r34_backbone=True, please use ResNet-34 as backbone."
+ self.backbone.res_layers[2].blocks[0].branch2a.conv._stride = [1, 1]
+ self.backbone.res_layers[2].blocks[0].short.conv._stride = [1, 1]
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ # backbone
+ backbone = create(cfg['backbone'])
+
+ # head
+ kwargs = {'input_shape': backbone.out_shape}
+ ssd_head = create(cfg['ssd_head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ "ssd_head": ssd_head,
+ }
+
+ def _forward(self):
+ # Backbone
+ body_feats = self.backbone(self.inputs)
+
+ # SSD Head
+ if self.training:
+ return self.ssd_head(body_feats, self.inputs['image'],
+ self.inputs['gt_bbox'],
+ self.inputs['gt_class'])
+ else:
+ preds, anchors = self.ssd_head(body_feats, self.inputs['image'])
+ bbox, bbox_num = self.post_process(preds, anchors,
+ self.inputs['im_shape'],
+ self.inputs['scale_factor'])
+ return bbox, bbox_num
+
+ def get_loss(self, ):
+ return {"loss": self._forward()}
+
+ def get_pred(self):
+ bbox_pred, bbox_num = self._forward()
+ output = {
+ "bbox": bbox_pred,
+ "bbox_num": bbox_num,
+ }
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/tood.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/tood.py
new file mode 100644
index 000000000..157ec6f3a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/tood.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['TOOD']
+
+
+@register
+class TOOD(BaseArch):
+ """
+ TOOD: Task-aligned One-stage Object Detection, see https://arxiv.org/abs/2108.07755
+ Args:
+ backbone (nn.Layer): backbone instance
+ neck (nn.Layer): 'FPN' instance
+ head (nn.Layer): 'TOODHead' instance
+ """
+
+ __category__ = 'architecture'
+
+ def __init__(self, backbone, neck, head):
+ super(TOOD, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.head = head
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = create(cfg['neck'], **kwargs)
+
+ kwargs = {'input_shape': neck.out_shape}
+ head = create(cfg['head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "head": head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ fpn_feats = self.neck(body_feats)
+ head_outs = self.head(fpn_feats)
+ if not self.training:
+ bboxes, bbox_num = self.head.post_process(
+ head_outs, self.inputs['im_shape'], self.inputs['scale_factor'])
+ return bboxes, bbox_num
+ else:
+ loss = self.head.get_loss(head_outs, self.inputs)
+ return loss
+
+ def get_loss(self):
+ return self._forward()
+
+ def get_pred(self):
+ bbox_pred, bbox_num = self._forward()
+ output = {'bbox': bbox_pred, 'bbox_num': bbox_num}
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/ttfnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/ttfnet.py
new file mode 100644
index 000000000..c3eb61c87
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/ttfnet.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['TTFNet']
+
+
+@register
+class TTFNet(BaseArch):
+ """
+ TTFNet network, see https://arxiv.org/abs/1909.00700
+
+ Args:
+ backbone (object): backbone instance
+ neck (object): 'TTFFPN' instance
+ ttf_head (object): 'TTFHead' instance
+ post_process (object): 'BBoxPostProcess' instance
+ """
+
+ __category__ = 'architecture'
+ __inject__ = ['post_process']
+
+ def __init__(self,
+ backbone='DarkNet',
+ neck='TTFFPN',
+ ttf_head='TTFHead',
+ post_process='BBoxPostProcess'):
+ super(TTFNet, self).__init__()
+ self.backbone = backbone
+ self.neck = neck
+ self.ttf_head = ttf_head
+ self.post_process = post_process
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ backbone = create(cfg['backbone'])
+
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = create(cfg['neck'], **kwargs)
+
+ kwargs = {'input_shape': neck.out_shape}
+ ttf_head = create(cfg['ttf_head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "ttf_head": ttf_head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ body_feats = self.neck(body_feats)
+ hm, wh = self.ttf_head(body_feats)
+ if self.training:
+ return hm, wh
+ else:
+ bbox, bbox_num = self.post_process(hm, wh, self.inputs['im_shape'],
+ self.inputs['scale_factor'])
+ return bbox, bbox_num
+
+ def get_loss(self, ):
+ loss = {}
+ heatmap = self.inputs['ttf_heatmap']
+ box_target = self.inputs['ttf_box_target']
+ reg_weight = self.inputs['ttf_reg_weight']
+ hm, wh = self._forward()
+ head_loss = self.ttf_head.get_loss(hm, wh, heatmap, box_target,
+ reg_weight)
+ loss.update(head_loss)
+ total_loss = paddle.add_n(list(loss.values()))
+ loss.update({'loss': total_loss})
+ return loss
+
+ def get_pred(self):
+ bbox_pred, bbox_num = self._forward()
+ output = {
+ "bbox": bbox_pred,
+ "bbox_num": bbox_num,
+ }
+ return output
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/yolo.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/yolo.py
new file mode 100644
index 000000000..d5979e695
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/architectures/yolo.py
@@ -0,0 +1,124 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+from ..post_process import JDEBBoxPostProcess
+
+__all__ = ['YOLOv3']
+
+
+@register
+class YOLOv3(BaseArch):
+ __category__ = 'architecture'
+ __shared__ = ['data_format']
+ __inject__ = ['post_process']
+
+ def __init__(self,
+ backbone='DarkNet',
+ neck='YOLOv3FPN',
+ yolo_head='YOLOv3Head',
+ post_process='BBoxPostProcess',
+ data_format='NCHW',
+ for_mot=False):
+ """
+ YOLOv3 network, see https://arxiv.org/abs/1804.02767
+
+ Args:
+ backbone (nn.Layer): backbone instance
+ neck (nn.Layer): neck instance
+ yolo_head (nn.Layer): anchor_head instance
+ bbox_post_process (object): `BBoxPostProcess` instance
+ data_format (str): data format, NCHW or NHWC
+ for_mot (bool): whether return other features for multi-object tracking
+ models, default False in pure object detection models.
+ """
+ super(YOLOv3, self).__init__(data_format=data_format)
+ self.backbone = backbone
+ self.neck = neck
+ self.yolo_head = yolo_head
+ self.post_process = post_process
+ self.for_mot = for_mot
+ self.return_idx = isinstance(post_process, JDEBBoxPostProcess)
+
+ @classmethod
+ def from_config(cls, cfg, *args, **kwargs):
+ # backbone
+ backbone = create(cfg['backbone'])
+
+ # fpn
+ kwargs = {'input_shape': backbone.out_shape}
+ neck = create(cfg['neck'], **kwargs)
+
+ # head
+ kwargs = {'input_shape': neck.out_shape}
+ yolo_head = create(cfg['yolo_head'], **kwargs)
+
+ return {
+ 'backbone': backbone,
+ 'neck': neck,
+ "yolo_head": yolo_head,
+ }
+
+ def _forward(self):
+ body_feats = self.backbone(self.inputs)
+ neck_feats = self.neck(body_feats, self.for_mot)
+
+ if isinstance(neck_feats, dict):
+ assert self.for_mot == True
+ emb_feats = neck_feats['emb_feats']
+ neck_feats = neck_feats['yolo_feats']
+
+ if self.training:
+ yolo_losses = self.yolo_head(neck_feats, self.inputs)
+
+ if self.for_mot:
+ return {'det_losses': yolo_losses, 'emb_feats': emb_feats}
+ else:
+ return yolo_losses
+
+ else:
+ yolo_head_outs = self.yolo_head(neck_feats)
+
+ if self.for_mot:
+ boxes_idx, bbox, bbox_num, nms_keep_idx = self.post_process(
+ yolo_head_outs, self.yolo_head.mask_anchors)
+ output = {
+ 'bbox': bbox,
+ 'bbox_num': bbox_num,
+ 'boxes_idx': boxes_idx,
+ 'nms_keep_idx': nms_keep_idx,
+ 'emb_feats': emb_feats,
+ }
+ else:
+ if self.return_idx:
+ _, bbox, bbox_num, _ = self.post_process(
+ yolo_head_outs, self.yolo_head.mask_anchors)
+ else:
+ bbox, bbox_num = self.post_process(
+ yolo_head_outs, self.yolo_head.mask_anchors,
+ self.inputs['im_shape'], self.inputs['scale_factor'])
+ output = {'bbox': bbox, 'bbox_num': bbox_num}
+
+ return output
+
+ def get_loss(self):
+ return self._forward()
+
+ def get_pred(self):
+ return self._forward()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__init__.py
new file mode 100644
index 000000000..be5bb04d3
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import utils
+from . import task_aligned_assigner
+from . import atss_assigner
+from . import simota_assigner
+
+from .utils import *
+from .task_aligned_assigner import *
+from .atss_assigner import *
+from .simota_assigner import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..ea40821c5
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/atss_assigner.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/atss_assigner.cpython-37.pyc
new file mode 100644
index 000000000..1cbaefccc
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/atss_assigner.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/simota_assigner.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/simota_assigner.cpython-37.pyc
new file mode 100644
index 000000000..359a50c2e
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/simota_assigner.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/task_aligned_assigner.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/task_aligned_assigner.cpython-37.pyc
new file mode 100644
index 000000000..55652436d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/task_aligned_assigner.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 000000000..e31eae2d2
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/__pycache__/utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/atss_assigner.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/atss_assigner.py
new file mode 100644
index 000000000..43e6ae2ab
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/atss_assigner.py
@@ -0,0 +1,209 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+
+from ppdet.core.workspace import register
+from ..ops import iou_similarity
+from ..bbox_utils import bbox_center
+from .utils import (pad_gt, check_points_inside_bboxes, compute_max_iou_anchor,
+ compute_max_iou_gt)
+
+
+@register
+class ATSSAssigner(nn.Layer):
+ """Bridging the Gap Between Anchor-based and Anchor-free Detection
+ via Adaptive Training Sample Selection
+ """
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ topk=9,
+ num_classes=80,
+ force_gt_matching=False,
+ eps=1e-9):
+ super(ATSSAssigner, self).__init__()
+ self.topk = topk
+ self.num_classes = num_classes
+ self.force_gt_matching = force_gt_matching
+ self.eps = eps
+
+ def _gather_topk_pyramid(self, gt2anchor_distances, num_anchors_list,
+ pad_gt_mask):
+ pad_gt_mask = pad_gt_mask.tile([1, 1, self.topk]).astype(paddle.bool)
+ gt2anchor_distances_list = paddle.split(
+ gt2anchor_distances, num_anchors_list, axis=-1)
+ num_anchors_index = np.cumsum(num_anchors_list).tolist()
+ num_anchors_index = [0, ] + num_anchors_index[:-1]
+ is_in_topk_list = []
+ topk_idxs_list = []
+ for distances, anchors_index in zip(gt2anchor_distances_list,
+ num_anchors_index):
+ num_anchors = distances.shape[-1]
+ topk_metrics, topk_idxs = paddle.topk(
+ distances, self.topk, axis=-1, largest=False)
+ topk_idxs_list.append(topk_idxs + anchors_index)
+ topk_idxs = paddle.where(pad_gt_mask, topk_idxs,
+ paddle.zeros_like(topk_idxs))
+ is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)
+ is_in_topk = paddle.where(is_in_topk > 1,
+ paddle.zeros_like(is_in_topk), is_in_topk)
+ is_in_topk_list.append(is_in_topk.astype(gt2anchor_distances.dtype))
+ is_in_topk_list = paddle.concat(is_in_topk_list, axis=-1)
+ topk_idxs_list = paddle.concat(topk_idxs_list, axis=-1)
+ return is_in_topk_list, topk_idxs_list
+
+ @paddle.no_grad()
+ def forward(self,
+ anchor_bboxes,
+ num_anchors_list,
+ gt_labels,
+ gt_bboxes,
+ bg_index,
+ gt_scores=None):
+ r"""This code is based on
+ https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/atss_assigner.py
+
+ The assignment is done in following steps
+ 1. compute iou between all bbox (bbox of all pyramid levels) and gt
+ 2. compute center distance between all bbox and gt
+ 3. on each pyramid level, for each gt, select k bbox whose center
+ are closest to the gt center, so we total select k*l bbox as
+ candidates for each gt
+ 4. get corresponding iou for the these candidates, and compute the
+ mean and std, set mean + std as the iou threshold
+ 5. select these candidates whose iou are greater than or equal to
+ the threshold as positive
+ 6. limit the positive sample's center in gt
+ 7. if an anchor box is assigned to multiple gts, the one with the
+ highest iou will be selected.
+ Args:
+ anchor_bboxes (Tensor, float32): pre-defined anchors, shape(L, 4),
+ "xmin, xmax, ymin, ymax" format
+ num_anchors_list (List): num of anchors in each level
+ gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape(B, n, 1)
+ gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape(B, n, 4)
+ bg_index (int): background index
+ gt_scores (Tensor|List[Tensor]|None, float32) Score of gt_bboxes,
+ shape(B, n, 1), if None, then it will initialize with one_hot label
+ Returns:
+ assigned_labels (Tensor): (B, L)
+ assigned_bboxes (Tensor): (B, L, 4)
+ assigned_scores (Tensor): (B, L, C)
+ """
+ gt_labels, gt_bboxes, pad_gt_scores, pad_gt_mask = pad_gt(
+ gt_labels, gt_bboxes, gt_scores)
+ assert gt_labels.ndim == gt_bboxes.ndim and \
+ gt_bboxes.ndim == 3
+
+ num_anchors, _ = anchor_bboxes.shape
+ batch_size, num_max_boxes, _ = gt_bboxes.shape
+
+ # negative batch
+ if num_max_boxes == 0:
+ assigned_labels = paddle.full([batch_size, num_anchors], bg_index)
+ assigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])
+ assigned_scores = paddle.zeros(
+ [batch_size, num_anchors, self.num_classes])
+ return assigned_labels, assigned_bboxes, assigned_scores
+
+ # 1. compute iou between gt and anchor bbox, [B, n, L]
+ ious = iou_similarity(gt_bboxes.reshape([-1, 4]), anchor_bboxes)
+ ious = ious.reshape([batch_size, -1, num_anchors])
+
+ # 2. compute center distance between all anchors and gt, [B, n, L]
+ gt_centers = bbox_center(gt_bboxes.reshape([-1, 4])).unsqueeze(1)
+ anchor_centers = bbox_center(anchor_bboxes)
+ gt2anchor_distances = (gt_centers - anchor_centers.unsqueeze(0)) \
+ .norm(2, axis=-1).reshape([batch_size, -1, num_anchors])
+
+ # 3. on each pyramid level, selecting topk closest candidates
+ # based on the center distance, [B, n, L]
+ is_in_topk, topk_idxs = self._gather_topk_pyramid(
+ gt2anchor_distances, num_anchors_list, pad_gt_mask)
+
+ # 4. get corresponding iou for the these candidates, and compute the
+ # mean and std, 5. set mean + std as the iou threshold
+ iou_candidates = ious * is_in_topk
+ iou_threshold = paddle.index_sample(
+ iou_candidates.flatten(stop_axis=-2),
+ topk_idxs.flatten(stop_axis=-2))
+ iou_threshold = iou_threshold.reshape([batch_size, num_max_boxes, -1])
+ iou_threshold = iou_threshold.mean(axis=-1, keepdim=True) + \
+ iou_threshold.std(axis=-1, keepdim=True)
+ is_in_topk = paddle.where(
+ iou_candidates > iou_threshold.tile([1, 1, num_anchors]),
+ is_in_topk, paddle.zeros_like(is_in_topk))
+
+ # 6. check the positive sample's center in gt, [B, n, L]
+ is_in_gts = check_points_inside_bboxes(anchor_centers, gt_bboxes)
+
+ # select positive sample, [B, n, L]
+ mask_positive = is_in_topk * is_in_gts * pad_gt_mask
+
+ # 7. if an anchor box is assigned to multiple gts,
+ # the one with the highest iou will be selected.
+ mask_positive_sum = mask_positive.sum(axis=-2)
+ if mask_positive_sum.max() > 1:
+ mask_multiple_gts = (mask_positive_sum.unsqueeze(1) > 1).tile(
+ [1, num_max_boxes, 1])
+ is_max_iou = compute_max_iou_anchor(ious)
+ mask_positive = paddle.where(mask_multiple_gts, is_max_iou,
+ mask_positive)
+ mask_positive_sum = mask_positive.sum(axis=-2)
+ # 8. make sure every gt_bbox matches the anchor
+ if self.force_gt_matching:
+ is_max_iou = compute_max_iou_gt(ious) * pad_gt_mask
+ mask_max_iou = (is_max_iou.sum(-2, keepdim=True) == 1).tile(
+ [1, num_max_boxes, 1])
+ mask_positive = paddle.where(mask_max_iou, is_max_iou,
+ mask_positive)
+ mask_positive_sum = mask_positive.sum(axis=-2)
+ assigned_gt_index = mask_positive.argmax(axis=-2)
+ assert mask_positive_sum.max() == 1, \
+ ("one anchor just assign one gt, but received not equals 1. "
+ "Received: %f" % mask_positive_sum.max().item())
+
+ # assigned target
+ batch_ind = paddle.arange(
+ end=batch_size, dtype=gt_labels.dtype).unsqueeze(-1)
+ assigned_gt_index = assigned_gt_index + batch_ind * num_max_boxes
+ assigned_labels = paddle.gather(
+ gt_labels.flatten(), assigned_gt_index.flatten(), axis=0)
+ assigned_labels = assigned_labels.reshape([batch_size, num_anchors])
+ assigned_labels = paddle.where(
+ mask_positive_sum > 0, assigned_labels,
+ paddle.full_like(assigned_labels, bg_index))
+
+ assigned_bboxes = paddle.gather(
+ gt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)
+ assigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])
+
+ assigned_scores = F.one_hot(assigned_labels, self.num_classes)
+ if gt_scores is not None:
+ gather_scores = paddle.gather(
+ pad_gt_scores.flatten(), assigned_gt_index.flatten(), axis=0)
+ gather_scores = gather_scores.reshape([batch_size, num_anchors])
+ gather_scores = paddle.where(mask_positive_sum > 0, gather_scores,
+ paddle.zeros_like(gather_scores))
+ assigned_scores *= gather_scores.unsqueeze(-1)
+
+ return assigned_labels, assigned_bboxes, assigned_scores
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/simota_assigner.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/simota_assigner.py
new file mode 100644
index 000000000..4b34027e3
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/simota_assigner.py
@@ -0,0 +1,262 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on:
+# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/sim_ota_assigner.py
+
+import paddle
+import numpy as np
+import paddle.nn.functional as F
+
+from ppdet.modeling.losses.varifocal_loss import varifocal_loss
+from ppdet.modeling.bbox_utils import batch_bbox_overlaps
+from ppdet.core.workspace import register
+
+
+@register
+class SimOTAAssigner(object):
+ """Computes matching between predictions and ground truth.
+ Args:
+ center_radius (int | float, optional): Ground truth center size
+ to judge whether a prior is in center. Default 2.5.
+ candidate_topk (int, optional): The candidate top-k which used to
+ get top-k ious to calculate dynamic-k. Default 10.
+ iou_weight (int | float, optional): The scale factor for regression
+ iou cost. Default 3.0.
+ cls_weight (int | float, optional): The scale factor for classification
+ cost. Default 1.0.
+ num_classes (int): The num_classes of dataset.
+ use_vfl (int): Whether to use varifocal_loss when calculating the cost matrix.
+ """
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ center_radius=2.5,
+ candidate_topk=10,
+ iou_weight=3.0,
+ cls_weight=1.0,
+ num_classes=80,
+ use_vfl=True):
+ self.center_radius = center_radius
+ self.candidate_topk = candidate_topk
+ self.iou_weight = iou_weight
+ self.cls_weight = cls_weight
+ self.num_classes = num_classes
+ self.use_vfl = use_vfl
+
+ def get_in_gt_and_in_center_info(self, flatten_center_and_stride,
+ gt_bboxes):
+ num_gt = gt_bboxes.shape[0]
+
+ flatten_x = flatten_center_and_stride[:, 0].unsqueeze(1).tile(
+ [1, num_gt])
+ flatten_y = flatten_center_and_stride[:, 1].unsqueeze(1).tile(
+ [1, num_gt])
+ flatten_stride_x = flatten_center_and_stride[:, 2].unsqueeze(1).tile(
+ [1, num_gt])
+ flatten_stride_y = flatten_center_and_stride[:, 3].unsqueeze(1).tile(
+ [1, num_gt])
+
+ # is prior centers in gt bboxes, shape: [n_center, n_gt]
+ l_ = flatten_x - gt_bboxes[:, 0]
+ t_ = flatten_y - gt_bboxes[:, 1]
+ r_ = gt_bboxes[:, 2] - flatten_x
+ b_ = gt_bboxes[:, 3] - flatten_y
+
+ deltas = paddle.stack([l_, t_, r_, b_], axis=1)
+ is_in_gts = deltas.min(axis=1) > 0
+ is_in_gts_all = is_in_gts.sum(axis=1) > 0
+
+ # is prior centers in gt centers
+ gt_center_xs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
+ gt_center_ys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
+ ct_bound_l = gt_center_xs - self.center_radius * flatten_stride_x
+ ct_bound_t = gt_center_ys - self.center_radius * flatten_stride_y
+ ct_bound_r = gt_center_xs + self.center_radius * flatten_stride_x
+ ct_bound_b = gt_center_ys + self.center_radius * flatten_stride_y
+
+ cl_ = flatten_x - ct_bound_l
+ ct_ = flatten_y - ct_bound_t
+ cr_ = ct_bound_r - flatten_x
+ cb_ = ct_bound_b - flatten_y
+
+ ct_deltas = paddle.stack([cl_, ct_, cr_, cb_], axis=1)
+ is_in_cts = ct_deltas.min(axis=1) > 0
+ is_in_cts_all = is_in_cts.sum(axis=1) > 0
+
+ # in any of gts or gt centers, shape: [n_center]
+ is_in_gts_or_centers_all = paddle.logical_or(is_in_gts_all,
+ is_in_cts_all)
+
+ is_in_gts_or_centers_all_inds = paddle.nonzero(
+ is_in_gts_or_centers_all).squeeze(1)
+
+ # both in gts and gt centers, shape: [num_fg, num_gt]
+ is_in_gts_and_centers = paddle.logical_and(
+ paddle.gather(
+ is_in_gts.cast('int'), is_in_gts_or_centers_all_inds,
+ axis=0).cast('bool'),
+ paddle.gather(
+ is_in_cts.cast('int'), is_in_gts_or_centers_all_inds,
+ axis=0).cast('bool'))
+ return is_in_gts_or_centers_all, is_in_gts_or_centers_all_inds, is_in_gts_and_centers
+
+ def dynamic_k_matching(self, cost_matrix, pairwise_ious, num_gt):
+ match_matrix = np.zeros_like(cost_matrix.numpy())
+ # select candidate topk ious for dynamic-k calculation
+ topk_ious, _ = paddle.topk(pairwise_ious, self.candidate_topk, axis=0)
+ # calculate dynamic k for each gt
+ dynamic_ks = paddle.clip(topk_ious.sum(0).cast('int'), min=1)
+ for gt_idx in range(num_gt):
+ _, pos_idx = paddle.topk(
+ cost_matrix[:, gt_idx], k=dynamic_ks[gt_idx], largest=False)
+ match_matrix[:, gt_idx][pos_idx.numpy()] = 1.0
+
+ del topk_ious, dynamic_ks, pos_idx
+
+ # match points more than two gts
+ extra_match_gts_mask = match_matrix.sum(1) > 1
+ if extra_match_gts_mask.sum() > 0:
+ cost_matrix = cost_matrix.numpy()
+ cost_argmin = np.argmin(
+ cost_matrix[extra_match_gts_mask, :], axis=1)
+ match_matrix[extra_match_gts_mask, :] *= 0.0
+ match_matrix[extra_match_gts_mask, cost_argmin] = 1.0
+ # get foreground mask
+ match_fg_mask_inmatrix = match_matrix.sum(1) > 0
+ match_gt_inds_to_fg = match_matrix[match_fg_mask_inmatrix, :].argmax(1)
+
+ return match_gt_inds_to_fg, match_fg_mask_inmatrix
+
+ def get_sample(self, assign_gt_inds, gt_bboxes):
+ pos_inds = np.unique(np.nonzero(assign_gt_inds > 0)[0])
+ neg_inds = np.unique(np.nonzero(assign_gt_inds == 0)[0])
+ pos_assigned_gt_inds = assign_gt_inds[pos_inds] - 1
+
+ if gt_bboxes.size == 0:
+ # hack for index error case
+ assert pos_assigned_gt_inds.size == 0
+ pos_gt_bboxes = np.empty_like(gt_bboxes).reshape(-1, 4)
+ else:
+ if len(gt_bboxes.shape) < 2:
+ gt_bboxes = gt_bboxes.resize(-1, 4)
+ pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds, :]
+ return pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds
+
+ def __call__(self,
+ flatten_cls_pred_scores,
+ flatten_center_and_stride,
+ flatten_bboxes,
+ gt_bboxes,
+ gt_labels,
+ eps=1e-7):
+ """Assign gt to priors using SimOTA.
+ TODO: add comment.
+ Returns:
+ assign_result: The assigned result.
+ """
+ num_gt = gt_bboxes.shape[0]
+ num_bboxes = flatten_bboxes.shape[0]
+
+ if num_gt == 0 or num_bboxes == 0:
+ # No ground truth or boxes
+ label = np.ones([num_bboxes], dtype=np.int64) * self.num_classes
+ label_weight = np.ones([num_bboxes], dtype=np.float32)
+ bbox_target = np.zeros_like(flatten_center_and_stride)
+ return 0, label, label_weight, bbox_target
+
+ is_in_gts_or_centers_all, is_in_gts_or_centers_all_inds, is_in_boxes_and_center = self.get_in_gt_and_in_center_info(
+ flatten_center_and_stride, gt_bboxes)
+
+ # bboxes and scores to calculate matrix
+ valid_flatten_bboxes = flatten_bboxes[is_in_gts_or_centers_all_inds]
+ valid_cls_pred_scores = flatten_cls_pred_scores[
+ is_in_gts_or_centers_all_inds]
+ num_valid_bboxes = valid_flatten_bboxes.shape[0]
+
+ pairwise_ious = batch_bbox_overlaps(valid_flatten_bboxes,
+ gt_bboxes) # [num_points,num_gts]
+ if self.use_vfl:
+ gt_vfl_labels = gt_labels.squeeze(-1).unsqueeze(0).tile(
+ [num_valid_bboxes, 1]).reshape([-1])
+ valid_pred_scores = valid_cls_pred_scores.unsqueeze(1).tile(
+ [1, num_gt, 1]).reshape([-1, self.num_classes])
+ vfl_score = np.zeros(valid_pred_scores.shape)
+ vfl_score[np.arange(0, vfl_score.shape[0]), gt_vfl_labels.numpy(
+ )] = pairwise_ious.reshape([-1])
+ vfl_score = paddle.to_tensor(vfl_score)
+ losses_vfl = varifocal_loss(
+ valid_pred_scores, vfl_score,
+ use_sigmoid=False).reshape([num_valid_bboxes, num_gt])
+ losses_giou = batch_bbox_overlaps(
+ valid_flatten_bboxes, gt_bboxes, mode='giou')
+ cost_matrix = (
+ losses_vfl * self.cls_weight + losses_giou * self.iou_weight +
+ paddle.logical_not(is_in_boxes_and_center).cast('float32') *
+ 100000000)
+ else:
+ iou_cost = -paddle.log(pairwise_ious + eps)
+ gt_onehot_label = (F.one_hot(
+ gt_labels.squeeze(-1).cast(paddle.int64),
+ flatten_cls_pred_scores.shape[-1]).cast('float32').unsqueeze(0)
+ .tile([num_valid_bboxes, 1, 1]))
+
+ valid_pred_scores = valid_cls_pred_scores.unsqueeze(1).tile(
+ [1, num_gt, 1])
+ cls_cost = F.binary_cross_entropy(
+ valid_pred_scores, gt_onehot_label, reduction='none').sum(-1)
+
+ cost_matrix = (
+ cls_cost * self.cls_weight + iou_cost * self.iou_weight +
+ paddle.logical_not(is_in_boxes_and_center).cast('float32') *
+ 100000000)
+
+ match_gt_inds_to_fg, match_fg_mask_inmatrix = \
+ self.dynamic_k_matching(
+ cost_matrix, pairwise_ious, num_gt)
+
+ # sample and assign results
+ assigned_gt_inds = np.zeros([num_bboxes], dtype=np.int64)
+ match_fg_mask_inall = np.zeros_like(assigned_gt_inds)
+ match_fg_mask_inall[is_in_gts_or_centers_all.numpy(
+ )] = match_fg_mask_inmatrix
+
+ assigned_gt_inds[match_fg_mask_inall.astype(
+ np.bool)] = match_gt_inds_to_fg + 1
+
+ pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds \
+ = self.get_sample(assigned_gt_inds, gt_bboxes.numpy())
+
+ bbox_target = np.zeros_like(flatten_bboxes)
+ bbox_weight = np.zeros_like(flatten_bboxes)
+ label = np.ones([num_bboxes], dtype=np.int64) * self.num_classes
+ label_weight = np.zeros([num_bboxes], dtype=np.float32)
+
+ if len(pos_inds) > 0:
+ gt_labels = gt_labels.numpy()
+ pos_bbox_targets = pos_gt_bboxes
+ bbox_target[pos_inds, :] = pos_bbox_targets
+ bbox_weight[pos_inds, :] = 1.0
+ if not np.any(gt_labels):
+ label[pos_inds] = 0
+ else:
+ label[pos_inds] = gt_labels.squeeze(-1)[pos_assigned_gt_inds]
+
+ label_weight[pos_inds] = 1.0
+ if len(neg_inds) > 0:
+ label_weight[neg_inds] = 1.0
+
+ pos_num = max(pos_inds.size, 1)
+
+ return pos_num, label, label_weight, bbox_target
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/task_aligned_assigner.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/task_aligned_assigner.py
new file mode 100644
index 000000000..7e31c8afc
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/task_aligned_assigner.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+
+from ppdet.core.workspace import register
+from ..bbox_utils import iou_similarity
+from .utils import (pad_gt, gather_topk_anchors, check_points_inside_bboxes,
+ compute_max_iou_anchor)
+
+
+@register
+class TaskAlignedAssigner(nn.Layer):
+ """TOOD: Task-aligned One-stage Object Detection
+ """
+
+ def __init__(self, topk=13, alpha=1.0, beta=6.0, eps=1e-9):
+ super(TaskAlignedAssigner, self).__init__()
+ self.topk = topk
+ self.alpha = alpha
+ self.beta = beta
+ self.eps = eps
+
+ @paddle.no_grad()
+ def forward(self,
+ pred_scores,
+ pred_bboxes,
+ anchor_points,
+ gt_labels,
+ gt_bboxes,
+ bg_index,
+ gt_scores=None):
+ r"""This code is based on
+ https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/task_aligned_assigner.py
+
+ The assignment is done in following steps
+ 1. compute alignment metric between all bbox (bbox of all pyramid levels) and gt
+ 2. select top-k bbox as candidates for each gt
+ 3. limit the positive sample's center in gt (because the anchor-free detector
+ only can predict positive distance)
+ 4. if an anchor box is assigned to multiple gts, the one with the
+ highest iou will be selected.
+ Args:
+ pred_scores (Tensor, float32): predicted class probability, shape(B, L, C)
+ pred_bboxes (Tensor, float32): predicted bounding boxes, shape(B, L, 4)
+ anchor_points (Tensor, float32): pre-defined anchors, shape(L, 2), "cxcy" format
+ gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape(B, n, 1)
+ gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape(B, n, 4)
+ bg_index (int): background index
+ gt_scores (Tensor|List[Tensor]|None, float32) Score of gt_bboxes,
+ shape(B, n, 1), if None, then it will initialize with one_hot label
+ Returns:
+ assigned_labels (Tensor): (B, L)
+ assigned_bboxes (Tensor): (B, L, 4)
+ assigned_scores (Tensor): (B, L, C)
+ """
+ assert pred_scores.ndim == pred_bboxes.ndim
+
+ gt_labels, gt_bboxes, pad_gt_scores, pad_gt_mask = pad_gt(
+ gt_labels, gt_bboxes, gt_scores)
+ assert gt_labels.ndim == gt_bboxes.ndim and \
+ gt_bboxes.ndim == 3
+
+ batch_size, num_anchors, num_classes = pred_scores.shape
+ _, num_max_boxes, _ = gt_bboxes.shape
+
+ # negative batch
+ if num_max_boxes == 0:
+ assigned_labels = paddle.full([batch_size, num_anchors], bg_index)
+ assigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])
+ assigned_scores = paddle.zeros(
+ [batch_size, num_anchors, num_classes])
+ return assigned_labels, assigned_bboxes, assigned_scores
+
+ # compute iou between gt and pred bbox, [B, n, L]
+ ious = iou_similarity(gt_bboxes, pred_bboxes)
+ # gather pred bboxes class score
+ pred_scores = pred_scores.transpose([0, 2, 1])
+ batch_ind = paddle.arange(
+ end=batch_size, dtype=gt_labels.dtype).unsqueeze(-1)
+ gt_labels_ind = paddle.stack(
+ [batch_ind.tile([1, num_max_boxes]), gt_labels.squeeze(-1)],
+ axis=-1)
+ bbox_cls_scores = paddle.gather_nd(pred_scores, gt_labels_ind)
+ # compute alignment metrics, [B, n, L]
+ alignment_metrics = bbox_cls_scores.pow(self.alpha) * ious.pow(
+ self.beta)
+
+ # check the positive sample's center in gt, [B, n, L]
+ is_in_gts = check_points_inside_bboxes(anchor_points, gt_bboxes)
+
+ # select topk largest alignment metrics pred bbox as candidates
+ # for each gt, [B, n, L]
+ is_in_topk = gather_topk_anchors(
+ alignment_metrics * is_in_gts,
+ self.topk,
+ topk_mask=pad_gt_mask.tile([1, 1, self.topk]).astype(paddle.bool))
+
+ # select positive sample, [B, n, L]
+ mask_positive = is_in_topk * is_in_gts * pad_gt_mask
+
+ # if an anchor box is assigned to multiple gts,
+ # the one with the highest iou will be selected, [B, n, L]
+ mask_positive_sum = mask_positive.sum(axis=-2)
+ if mask_positive_sum.max() > 1:
+ mask_multiple_gts = (mask_positive_sum.unsqueeze(1) > 1).tile(
+ [1, num_max_boxes, 1])
+ is_max_iou = compute_max_iou_anchor(ious)
+ mask_positive = paddle.where(mask_multiple_gts, is_max_iou,
+ mask_positive)
+ mask_positive_sum = mask_positive.sum(axis=-2)
+ assigned_gt_index = mask_positive.argmax(axis=-2)
+ assert mask_positive_sum.max() == 1, \
+ ("one anchor just assign one gt, but received not equals 1. "
+ "Received: %f" % mask_positive_sum.max().item())
+
+ # assigned target
+ assigned_gt_index = assigned_gt_index + batch_ind * num_max_boxes
+ assigned_labels = paddle.gather(
+ gt_labels.flatten(), assigned_gt_index.flatten(), axis=0)
+ assigned_labels = assigned_labels.reshape([batch_size, num_anchors])
+ assigned_labels = paddle.where(
+ mask_positive_sum > 0, assigned_labels,
+ paddle.full_like(assigned_labels, bg_index))
+
+ assigned_bboxes = paddle.gather(
+ gt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)
+ assigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])
+
+ assigned_scores = F.one_hot(assigned_labels, num_classes)
+ # rescale alignment metrics
+ alignment_metrics *= mask_positive
+ max_metrics_per_instance = alignment_metrics.max(axis=-1, keepdim=True)
+ max_ious_per_instance = (ious * mask_positive).max(axis=-1,
+ keepdim=True)
+ alignment_metrics = alignment_metrics / (
+ max_metrics_per_instance + self.eps) * max_ious_per_instance
+ alignment_metrics = alignment_metrics.max(-2).unsqueeze(-1)
+ assigned_scores = assigned_scores * alignment_metrics
+
+ return assigned_labels, assigned_bboxes, assigned_scores
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/utils.py
new file mode 100644
index 000000000..3448d9d8a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/assigners/utils.py
@@ -0,0 +1,149 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn.functional as F
+
+
+def pad_gt(gt_labels, gt_bboxes, gt_scores=None):
+ r""" Pad 0 in gt_labels and gt_bboxes.
+ Args:
+ gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes,
+ shape is [B, n, 1] or [[n_1, 1], [n_2, 1], ...], here n = sum(n_i)
+ gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes,
+ shape is [B, n, 4] or [[n_1, 4], [n_2, 4], ...], here n = sum(n_i)
+ gt_scores (Tensor|List[Tensor]|None, float32): Score of gt_bboxes,
+ shape is [B, n, 1] or [[n_1, 4], [n_2, 4], ...], here n = sum(n_i)
+ Returns:
+ pad_gt_labels (Tensor, int64): shape[B, n, 1]
+ pad_gt_bboxes (Tensor, float32): shape[B, n, 4]
+ pad_gt_scores (Tensor, float32): shape[B, n, 1]
+ pad_gt_mask (Tensor, float32): shape[B, n, 1], 1 means bbox, 0 means no bbox
+ """
+ if isinstance(gt_labels, paddle.Tensor) and isinstance(gt_bboxes,
+ paddle.Tensor):
+ assert gt_labels.ndim == gt_bboxes.ndim and \
+ gt_bboxes.ndim == 3
+ pad_gt_mask = (
+ gt_bboxes.sum(axis=-1, keepdim=True) > 0).astype(gt_bboxes.dtype)
+ if gt_scores is None:
+ gt_scores = pad_gt_mask.clone()
+ assert gt_labels.ndim == gt_scores.ndim
+
+ return gt_labels, gt_bboxes, gt_scores, pad_gt_mask
+ elif isinstance(gt_labels, list) and isinstance(gt_bboxes, list):
+ assert len(gt_labels) == len(gt_bboxes), \
+ 'The number of `gt_labels` and `gt_bboxes` is not equal. '
+ num_max_boxes = max([len(a) for a in gt_bboxes])
+ batch_size = len(gt_bboxes)
+ # pad label and bbox
+ pad_gt_labels = paddle.zeros(
+ [batch_size, num_max_boxes, 1], dtype=gt_labels[0].dtype)
+ pad_gt_bboxes = paddle.zeros(
+ [batch_size, num_max_boxes, 4], dtype=gt_bboxes[0].dtype)
+ pad_gt_scores = paddle.zeros(
+ [batch_size, num_max_boxes, 1], dtype=gt_bboxes[0].dtype)
+ pad_gt_mask = paddle.zeros(
+ [batch_size, num_max_boxes, 1], dtype=gt_bboxes[0].dtype)
+ for i, (label, bbox) in enumerate(zip(gt_labels, gt_bboxes)):
+ if len(label) > 0 and len(bbox) > 0:
+ pad_gt_labels[i, :len(label)] = label
+ pad_gt_bboxes[i, :len(bbox)] = bbox
+ pad_gt_mask[i, :len(bbox)] = 1.
+ if gt_scores is not None:
+ pad_gt_scores[i, :len(gt_scores[i])] = gt_scores[i]
+ if gt_scores is None:
+ pad_gt_scores = pad_gt_mask.clone()
+ return pad_gt_labels, pad_gt_bboxes, pad_gt_scores, pad_gt_mask
+ else:
+ raise ValueError('The input `gt_labels` or `gt_bboxes` is invalid! ')
+
+
+def gather_topk_anchors(metrics, topk, largest=True, topk_mask=None, eps=1e-9):
+ r"""
+ Args:
+ metrics (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors
+ topk (int): The number of top elements to look for along the axis.
+ largest (bool) : largest is a flag, if set to true,
+ algorithm will sort by descending order, otherwise sort by
+ ascending order. Default: True
+ topk_mask (Tensor, bool|None): shape[B, n, topk], ignore bbox mask,
+ Default: None
+ eps (float): Default: 1e-9
+ Returns:
+ is_in_topk (Tensor, float32): shape[B, n, L], value=1. means selected
+ """
+ num_anchors = metrics.shape[-1]
+ topk_metrics, topk_idxs = paddle.topk(
+ metrics, topk, axis=-1, largest=largest)
+ if topk_mask is None:
+ topk_mask = (topk_metrics.max(axis=-1, keepdim=True) > eps).tile(
+ [1, 1, topk])
+ topk_idxs = paddle.where(topk_mask, topk_idxs, paddle.zeros_like(topk_idxs))
+ is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)
+ is_in_topk = paddle.where(is_in_topk > 1,
+ paddle.zeros_like(is_in_topk), is_in_topk)
+ return is_in_topk.astype(metrics.dtype)
+
+
+def check_points_inside_bboxes(points, bboxes, eps=1e-9):
+ r"""
+ Args:
+ points (Tensor, float32): shape[L, 2], "xy" format, L: num_anchors
+ bboxes (Tensor, float32): shape[B, n, 4], "xmin, ymin, xmax, ymax" format
+ eps (float): Default: 1e-9
+ Returns:
+ is_in_bboxes (Tensor, float32): shape[B, n, L], value=1. means selected
+ """
+ points = points.unsqueeze([0, 1])
+ x, y = points.chunk(2, axis=-1)
+ xmin, ymin, xmax, ymax = bboxes.unsqueeze(2).chunk(4, axis=-1)
+ l = x - xmin
+ t = y - ymin
+ r = xmax - x
+ b = ymax - y
+ bbox_ltrb = paddle.concat([l, t, r, b], axis=-1)
+ return (bbox_ltrb.min(axis=-1) > eps).astype(bboxes.dtype)
+
+
+def compute_max_iou_anchor(ious):
+ r"""
+ For each anchor, find the GT with the largest IOU.
+ Args:
+ ious (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors
+ Returns:
+ is_max_iou (Tensor, float32): shape[B, n, L], value=1. means selected
+ """
+ num_max_boxes = ious.shape[-2]
+ max_iou_index = ious.argmax(axis=-2)
+ is_max_iou = F.one_hot(max_iou_index, num_max_boxes).transpose([0, 2, 1])
+ return is_max_iou.astype(ious.dtype)
+
+
+def compute_max_iou_gt(ious):
+ r"""
+ For each GT, find the anchor with the largest IOU.
+ Args:
+ ious (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors
+ Returns:
+ is_max_iou (Tensor, float32): shape[B, n, L], value=1. means selected
+ """
+ num_anchors = ious.shape[-1]
+ max_iou_index = ious.argmax(axis=-1)
+ is_max_iou = F.one_hot(max_iou_index, num_anchors)
+ return is_max_iou.astype(ious.dtype)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__init__.py
new file mode 100644
index 000000000..3f415e6a5
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__init__.py
@@ -0,0 +1,49 @@
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import vgg
+from . import resnet
+from . import darknet
+from . import mobilenet_v1
+from . import mobilenet_v3
+from . import hrnet
+from . import lite_hrnet
+from . import blazenet
+from . import ghostnet
+from . import senet
+from . import res2net
+from . import dla
+from . import shufflenet_v2
+from . import swin_transformer
+from . import lcnet
+from . import hardnet
+from . import esnet
+
+from .vgg import *
+from .resnet import *
+from .darknet import *
+from .mobilenet_v1 import *
+from .mobilenet_v3 import *
+from .hrnet import *
+from .lite_hrnet import *
+from .blazenet import *
+from .ghostnet import *
+from .senet import *
+from .res2net import *
+from .dla import *
+from .shufflenet_v2 import *
+from .swin_transformer import *
+from .lcnet import *
+from .hardnet import *
+from .esnet import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..065c620ad
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/blazenet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/blazenet.cpython-37.pyc
new file mode 100644
index 000000000..afc60e84c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/blazenet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/darknet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/darknet.cpython-37.pyc
new file mode 100644
index 000000000..626ca3c65
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/darknet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/dla.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/dla.cpython-37.pyc
new file mode 100644
index 000000000..2319927be
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/dla.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/esnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/esnet.cpython-37.pyc
new file mode 100644
index 000000000..dd0b7cd16
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/esnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/ghostnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/ghostnet.cpython-37.pyc
new file mode 100644
index 000000000..630894564
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/ghostnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/hardnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/hardnet.cpython-37.pyc
new file mode 100644
index 000000000..085823a44
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/hardnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/hrnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/hrnet.cpython-37.pyc
new file mode 100644
index 000000000..c610b5d31
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/hrnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/lcnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/lcnet.cpython-37.pyc
new file mode 100644
index 000000000..c4294309c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/lcnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/lite_hrnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/lite_hrnet.cpython-37.pyc
new file mode 100644
index 000000000..abbfe056c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/lite_hrnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/mobilenet_v1.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/mobilenet_v1.cpython-37.pyc
new file mode 100644
index 000000000..5c53f4dd3
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/mobilenet_v1.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/mobilenet_v3.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/mobilenet_v3.cpython-37.pyc
new file mode 100644
index 000000000..4b589de4f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/mobilenet_v3.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/name_adapter.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/name_adapter.cpython-37.pyc
new file mode 100644
index 000000000..8654f5ebe
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/name_adapter.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/res2net.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/res2net.cpython-37.pyc
new file mode 100644
index 000000000..a418868ba
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/res2net.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/resnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/resnet.cpython-37.pyc
new file mode 100644
index 000000000..ba70f23d8
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/resnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/senet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/senet.cpython-37.pyc
new file mode 100644
index 000000000..4fe1193c7
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/senet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/shufflenet_v2.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/shufflenet_v2.cpython-37.pyc
new file mode 100644
index 000000000..7fa8928fe
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/shufflenet_v2.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/swin_transformer.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/swin_transformer.cpython-37.pyc
new file mode 100644
index 000000000..c0bbf2d3e
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/swin_transformer.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/vgg.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/vgg.cpython-37.pyc
new file mode 100644
index 000000000..ff8f56ec1
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/__pycache__/vgg.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/blazenet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/blazenet.py
new file mode 100644
index 000000000..425f2a86e
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/blazenet.py
@@ -0,0 +1,322 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import KaimingNormal
+from ppdet.core.workspace import register, serializable
+from ..shape_spec import ShapeSpec
+
+__all__ = ['BlazeNet']
+
+
+def hard_swish(x):
+ return x * F.relu6(x + 3) / 6.
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ padding,
+ num_groups=1,
+ act='relu',
+ conv_lr=0.1,
+ conv_decay=0.,
+ norm_decay=0.,
+ norm_type='bn',
+ name=None):
+ super(ConvBNLayer, self).__init__()
+ self.act = act
+ self._conv = nn.Conv2D(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ groups=num_groups,
+ weight_attr=ParamAttr(
+ learning_rate=conv_lr, initializer=KaimingNormal()),
+ bias_attr=False)
+
+ if norm_type == 'sync_bn':
+ self._batch_norm = nn.SyncBatchNorm(out_channels)
+ else:
+ self._batch_norm = nn.BatchNorm(
+ out_channels, act=None, use_global_stats=False)
+
+ def forward(self, x):
+ x = self._conv(x)
+ x = self._batch_norm(x)
+ if self.act == "relu":
+ x = F.relu(x)
+ elif self.act == "relu6":
+ x = F.relu6(x)
+ elif self.act == 'leaky':
+ x = F.leaky_relu(x)
+ elif self.act == 'hard_swish':
+ x = hard_swish(x)
+ return x
+
+
+class BlazeBlock(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels1,
+ out_channels2,
+ double_channels=None,
+ stride=1,
+ use_5x5kernel=True,
+ act='relu',
+ name=None):
+ super(BlazeBlock, self).__init__()
+ assert stride in [1, 2]
+ self.use_pool = not stride == 1
+ self.use_double_block = double_channels is not None
+ self.conv_dw = []
+ if use_5x5kernel:
+ self.conv_dw.append(
+ self.add_sublayer(
+ name + "1_dw",
+ ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels1,
+ kernel_size=5,
+ stride=stride,
+ padding=2,
+ num_groups=out_channels1,
+ name=name + "1_dw")))
+ else:
+ self.conv_dw.append(
+ self.add_sublayer(
+ name + "1_dw_1",
+ ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels1,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ num_groups=out_channels1,
+ name=name + "1_dw_1")))
+ self.conv_dw.append(
+ self.add_sublayer(
+ name + "1_dw_2",
+ ConvBNLayer(
+ in_channels=out_channels1,
+ out_channels=out_channels1,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ num_groups=out_channels1,
+ name=name + "1_dw_2")))
+ self.act = act if self.use_double_block else None
+ self.conv_pw = ConvBNLayer(
+ in_channels=out_channels1,
+ out_channels=out_channels2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ act=self.act,
+ name=name + "1_sep")
+ if self.use_double_block:
+ self.conv_dw2 = []
+ if use_5x5kernel:
+ self.conv_dw2.append(
+ self.add_sublayer(
+ name + "2_dw",
+ ConvBNLayer(
+ in_channels=out_channels2,
+ out_channels=out_channels2,
+ kernel_size=5,
+ stride=1,
+ padding=2,
+ num_groups=out_channels2,
+ name=name + "2_dw")))
+ else:
+ self.conv_dw2.append(
+ self.add_sublayer(
+ name + "2_dw_1",
+ ConvBNLayer(
+ in_channels=out_channels2,
+ out_channels=out_channels2,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ num_groups=out_channels2,
+ name=name + "1_dw_1")))
+ self.conv_dw2.append(
+ self.add_sublayer(
+ name + "2_dw_2",
+ ConvBNLayer(
+ in_channels=out_channels2,
+ out_channels=out_channels2,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ num_groups=out_channels2,
+ name=name + "2_dw_2")))
+ self.conv_pw2 = ConvBNLayer(
+ in_channels=out_channels2,
+ out_channels=double_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ name=name + "2_sep")
+ # shortcut
+ if self.use_pool:
+ shortcut_channel = double_channels or out_channels2
+ self._shortcut = []
+ self._shortcut.append(
+ self.add_sublayer(
+ name + '_shortcut_pool',
+ nn.MaxPool2D(
+ kernel_size=stride, stride=stride, ceil_mode=True)))
+ self._shortcut.append(
+ self.add_sublayer(
+ name + '_shortcut_conv',
+ ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=shortcut_channel,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ name="shortcut" + name)))
+
+ def forward(self, x):
+ y = x
+ for conv_dw_block in self.conv_dw:
+ y = conv_dw_block(y)
+ y = self.conv_pw(y)
+ if self.use_double_block:
+ for conv_dw2_block in self.conv_dw2:
+ y = conv_dw2_block(y)
+ y = self.conv_pw2(y)
+ if self.use_pool:
+ for shortcut in self._shortcut:
+ x = shortcut(x)
+ return F.relu(paddle.add(x, y))
+
+
+@register
+@serializable
+class BlazeNet(nn.Layer):
+ """
+ BlazeFace, see https://arxiv.org/abs/1907.05047
+
+ Args:
+ blaze_filters (list): number of filter for each blaze block.
+ double_blaze_filters (list): number of filter for each double_blaze block.
+ use_5x5kernel (bool): whether or not filter size is 5x5 in depth-wise conv.
+ """
+
+ def __init__(
+ self,
+ blaze_filters=[[24, 24], [24, 24], [24, 48, 2], [48, 48], [48, 48]],
+ double_blaze_filters=[[48, 24, 96, 2], [96, 24, 96], [96, 24, 96],
+ [96, 24, 96, 2], [96, 24, 96], [96, 24, 96]],
+ use_5x5kernel=True,
+ act=None):
+ super(BlazeNet, self).__init__()
+ conv1_num_filters = blaze_filters[0][0]
+ self.conv1 = ConvBNLayer(
+ in_channels=3,
+ out_channels=conv1_num_filters,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ name="conv1")
+ in_channels = conv1_num_filters
+ self.blaze_block = []
+ self._out_channels = []
+ for k, v in enumerate(blaze_filters):
+ assert len(v) in [2, 3], \
+ "blaze_filters {} not in [2, 3]"
+ if len(v) == 2:
+ self.blaze_block.append(
+ self.add_sublayer(
+ 'blaze_{}'.format(k),
+ BlazeBlock(
+ in_channels,
+ v[0],
+ v[1],
+ use_5x5kernel=use_5x5kernel,
+ act=act,
+ name='blaze_{}'.format(k))))
+ elif len(v) == 3:
+ self.blaze_block.append(
+ self.add_sublayer(
+ 'blaze_{}'.format(k),
+ BlazeBlock(
+ in_channels,
+ v[0],
+ v[1],
+ stride=v[2],
+ use_5x5kernel=use_5x5kernel,
+ act=act,
+ name='blaze_{}'.format(k))))
+ in_channels = v[1]
+
+ for k, v in enumerate(double_blaze_filters):
+ assert len(v) in [3, 4], \
+ "blaze_filters {} not in [3, 4]"
+ if len(v) == 3:
+ self.blaze_block.append(
+ self.add_sublayer(
+ 'double_blaze_{}'.format(k),
+ BlazeBlock(
+ in_channels,
+ v[0],
+ v[1],
+ double_channels=v[2],
+ use_5x5kernel=use_5x5kernel,
+ act=act,
+ name='double_blaze_{}'.format(k))))
+ elif len(v) == 4:
+ self.blaze_block.append(
+ self.add_sublayer(
+ 'double_blaze_{}'.format(k),
+ BlazeBlock(
+ in_channels,
+ v[0],
+ v[1],
+ double_channels=v[2],
+ stride=v[3],
+ use_5x5kernel=use_5x5kernel,
+ act=act,
+ name='double_blaze_{}'.format(k))))
+ in_channels = v[2]
+ self._out_channels.append(in_channels)
+
+ def forward(self, inputs):
+ outs = []
+ y = self.conv1(inputs['image'])
+ for block in self.blaze_block:
+ y = block(y)
+ outs.append(y)
+ return [outs[-4], outs[-1]]
+
+ @property
+ def out_shape(self):
+ return [
+ ShapeSpec(channels=c)
+ for c in [self._out_channels[-4], self._out_channels[-1]]
+ ]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/darknet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/darknet.py
new file mode 100644
index 000000000..246529699
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/darknet.py
@@ -0,0 +1,340 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+
+from ppdet.core.workspace import register, serializable
+from ppdet.modeling.ops import batch_norm, mish
+from ..shape_spec import ShapeSpec
+
+__all__ = ['DarkNet', 'ConvBNLayer']
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ filter_size=3,
+ stride=1,
+ groups=1,
+ padding=0,
+ norm_type='bn',
+ norm_decay=0.,
+ act="leaky",
+ freeze_norm=False,
+ data_format='NCHW',
+ name=''):
+ """
+ conv + bn + activation layer
+
+ Args:
+ ch_in (int): input channel
+ ch_out (int): output channel
+ filter_size (int): filter size, default 3
+ stride (int): stride, default 1
+ groups (int): number of groups of conv layer, default 1
+ padding (int): padding size, default 0
+ norm_type (str): batch norm type, default bn
+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.
+ act (str): activation function type, default 'leaky', which means leaky_relu
+ freeze_norm (bool): whether to freeze norm, default False
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(ConvBNLayer, self).__init__()
+
+ self.conv = nn.Conv2D(
+ in_channels=ch_in,
+ out_channels=ch_out,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=padding,
+ groups=groups,
+ data_format=data_format,
+ bias_attr=False)
+ self.batch_norm = batch_norm(
+ ch_out,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format)
+ self.act = act
+
+ def forward(self, inputs):
+ out = self.conv(inputs)
+ out = self.batch_norm(out)
+ if self.act == 'leaky':
+ out = F.leaky_relu(out, 0.1)
+ elif self.act == 'mish':
+ out = mish(out)
+ return out
+
+
+class DownSample(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ filter_size=3,
+ stride=2,
+ padding=1,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ data_format='NCHW'):
+ """
+ downsample layer
+
+ Args:
+ ch_in (int): input channel
+ ch_out (int): output channel
+ filter_size (int): filter size, default 3
+ stride (int): stride, default 2
+ padding (int): padding size, default 1
+ norm_type (str): batch norm type, default bn
+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.
+ freeze_norm (bool): whether to freeze norm, default False
+ data_format (str): data format, NCHW or NHWC
+ """
+
+ super(DownSample, self).__init__()
+
+ self.conv_bn_layer = ConvBNLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ filter_size=filter_size,
+ stride=stride,
+ padding=padding,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format)
+ self.ch_out = ch_out
+
+ def forward(self, inputs):
+ out = self.conv_bn_layer(inputs)
+ return out
+
+
+class BasicBlock(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ data_format='NCHW'):
+ """
+ BasicBlock layer of DarkNet
+
+ Args:
+ ch_in (int): input channel
+ ch_out (int): output channel
+ norm_type (str): batch norm type, default bn
+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.
+ freeze_norm (bool): whether to freeze norm, default False
+ data_format (str): data format, NCHW or NHWC
+ """
+
+ super(BasicBlock, self).__init__()
+
+ self.conv1 = ConvBNLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format)
+ self.conv2 = ConvBNLayer(
+ ch_in=ch_out,
+ ch_out=ch_out * 2,
+ filter_size=3,
+ stride=1,
+ padding=1,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format)
+
+ def forward(self, inputs):
+ conv1 = self.conv1(inputs)
+ conv2 = self.conv2(conv1)
+ out = paddle.add(x=inputs, y=conv2)
+ return out
+
+
+class Blocks(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ count,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ name=None,
+ data_format='NCHW'):
+ """
+ Blocks layer, which consist of some BaickBlock layers
+
+ Args:
+ ch_in (int): input channel
+ ch_out (int): output channel
+ count (int): number of BasicBlock layer
+ norm_type (str): batch norm type, default bn
+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.
+ freeze_norm (bool): whether to freeze norm, default False
+ name (str): layer name
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(Blocks, self).__init__()
+
+ self.basicblock0 = BasicBlock(
+ ch_in,
+ ch_out,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format)
+ self.res_out_list = []
+ for i in range(1, count):
+ block_name = '{}.{}'.format(name, i)
+ res_out = self.add_sublayer(
+ block_name,
+ BasicBlock(
+ ch_out * 2,
+ ch_out,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format))
+ self.res_out_list.append(res_out)
+ self.ch_out = ch_out
+
+ def forward(self, inputs):
+ y = self.basicblock0(inputs)
+ for basic_block_i in self.res_out_list:
+ y = basic_block_i(y)
+ return y
+
+
+DarkNet_cfg = {53: ([1, 2, 8, 8, 4])}
+
+
+@register
+@serializable
+class DarkNet(nn.Layer):
+ __shared__ = ['norm_type', 'data_format']
+
+ def __init__(self,
+ depth=53,
+ freeze_at=-1,
+ return_idx=[2, 3, 4],
+ num_stages=5,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ data_format='NCHW'):
+ """
+ Darknet, see https://pjreddie.com/darknet/yolo/
+
+ Args:
+ depth (int): depth of network
+ freeze_at (int): freeze the backbone at which stage
+ filter_size (int): filter size, default 3
+ return_idx (list): index of stages whose feature maps are returned
+ norm_type (str): batch norm type, default bn
+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(DarkNet, self).__init__()
+ self.depth = depth
+ self.freeze_at = freeze_at
+ self.return_idx = return_idx
+ self.num_stages = num_stages
+ self.stages = DarkNet_cfg[self.depth][0:num_stages]
+
+ self.conv0 = ConvBNLayer(
+ ch_in=3,
+ ch_out=32,
+ filter_size=3,
+ stride=1,
+ padding=1,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format)
+
+ self.downsample0 = DownSample(
+ ch_in=32,
+ ch_out=32 * 2,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format)
+
+ self._out_channels = []
+ self.darknet_conv_block_list = []
+ self.downsample_list = []
+ ch_in = [64, 128, 256, 512, 1024]
+ for i, stage in enumerate(self.stages):
+ name = 'stage.{}'.format(i)
+ conv_block = self.add_sublayer(
+ name,
+ Blocks(
+ int(ch_in[i]),
+ 32 * (2**i),
+ stage,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format,
+ name=name))
+ self.darknet_conv_block_list.append(conv_block)
+ if i in return_idx:
+ self._out_channels.append(64 * (2**i))
+ for i in range(num_stages - 1):
+ down_name = 'stage.{}.downsample'.format(i)
+ downsample = self.add_sublayer(
+ down_name,
+ DownSample(
+ ch_in=32 * (2**(i + 1)),
+ ch_out=32 * (2**(i + 2)),
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ data_format=data_format))
+ self.downsample_list.append(downsample)
+
+ def forward(self, inputs):
+ x = inputs['image']
+
+ out = self.conv0(x)
+ out = self.downsample0(out)
+ blocks = []
+ for i, conv_block_i in enumerate(self.darknet_conv_block_list):
+ out = conv_block_i(out)
+ if i == self.freeze_at:
+ out.stop_gradient = True
+ if i in self.return_idx:
+ blocks.append(out)
+ if i < self.num_stages - 1:
+ out = self.downsample_list[i](out)
+ return blocks
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/dla.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/dla.py
new file mode 100644
index 000000000..4ab06ab7f
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/dla.py
@@ -0,0 +1,243 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register, serializable
+from ppdet.modeling.layers import ConvNormLayer
+from ..shape_spec import ShapeSpec
+
+DLA_cfg = {34: ([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512])}
+
+
+class BasicBlock(nn.Layer):
+ def __init__(self, ch_in, ch_out, stride=1):
+ super(BasicBlock, self).__init__()
+ self.conv1 = ConvNormLayer(
+ ch_in,
+ ch_out,
+ filter_size=3,
+ stride=stride,
+ bias_on=False,
+ norm_decay=None)
+ self.conv2 = ConvNormLayer(
+ ch_out,
+ ch_out,
+ filter_size=3,
+ stride=1,
+ bias_on=False,
+ norm_decay=None)
+
+ def forward(self, inputs, residual=None):
+ if residual is None:
+ residual = inputs
+
+ out = self.conv1(inputs)
+ out = F.relu(out)
+
+ out = self.conv2(out)
+
+ out = paddle.add(x=out, y=residual)
+ out = F.relu(out)
+
+ return out
+
+
+class Root(nn.Layer):
+ def __init__(self, ch_in, ch_out, kernel_size, residual):
+ super(Root, self).__init__()
+ self.conv = ConvNormLayer(
+ ch_in,
+ ch_out,
+ filter_size=1,
+ stride=1,
+ bias_on=False,
+ norm_decay=None)
+ self.residual = residual
+
+ def forward(self, inputs):
+ children = inputs
+ out = self.conv(paddle.concat(inputs, axis=1))
+ if self.residual:
+ out = paddle.add(x=out, y=children[0])
+ out = F.relu(out)
+
+ return out
+
+
+class Tree(nn.Layer):
+ def __init__(self,
+ level,
+ block,
+ ch_in,
+ ch_out,
+ stride=1,
+ level_root=False,
+ root_dim=0,
+ root_kernel_size=1,
+ root_residual=False):
+ super(Tree, self).__init__()
+ if root_dim == 0:
+ root_dim = 2 * ch_out
+ if level_root:
+ root_dim += ch_in
+ if level == 1:
+ self.tree1 = block(ch_in, ch_out, stride)
+ self.tree2 = block(ch_out, ch_out, 1)
+ else:
+ self.tree1 = Tree(
+ level - 1,
+ block,
+ ch_in,
+ ch_out,
+ stride,
+ root_dim=0,
+ root_kernel_size=root_kernel_size,
+ root_residual=root_residual)
+ self.tree2 = Tree(
+ level - 1,
+ block,
+ ch_out,
+ ch_out,
+ 1,
+ root_dim=root_dim + ch_out,
+ root_kernel_size=root_kernel_size,
+ root_residual=root_residual)
+
+ if level == 1:
+ self.root = Root(root_dim, ch_out, root_kernel_size, root_residual)
+ self.level_root = level_root
+ self.root_dim = root_dim
+ self.downsample = None
+ self.project = None
+ self.level = level
+ if stride > 1:
+ self.downsample = nn.MaxPool2D(stride, stride=stride)
+ if ch_in != ch_out:
+ self.project = ConvNormLayer(
+ ch_in,
+ ch_out,
+ filter_size=1,
+ stride=1,
+ bias_on=False,
+ norm_decay=None)
+
+ def forward(self, x, residual=None, children=None):
+ children = [] if children is None else children
+ bottom = self.downsample(x) if self.downsample else x
+ residual = self.project(bottom) if self.project else bottom
+ if self.level_root:
+ children.append(bottom)
+ x1 = self.tree1(x, residual)
+ if self.level == 1:
+ x2 = self.tree2(x1)
+ x = self.root([x2, x1] + children)
+ else:
+ children.append(x1)
+ x = self.tree2(x1, children=children)
+ return x
+
+
+@register
+@serializable
+class DLA(nn.Layer):
+ """
+ DLA, see https://arxiv.org/pdf/1707.06484.pdf
+
+ Args:
+ depth (int): DLA depth, should be 34.
+ residual_root (bool): whether use a reidual layer in the root block
+
+ """
+
+ def __init__(self, depth=34, residual_root=False):
+ super(DLA, self).__init__()
+ levels, channels = DLA_cfg[depth]
+ if depth == 34:
+ block = BasicBlock
+ self.channels = channels
+ self.base_layer = nn.Sequential(
+ ConvNormLayer(
+ 3,
+ channels[0],
+ filter_size=7,
+ stride=1,
+ bias_on=False,
+ norm_decay=None),
+ nn.ReLU())
+ self.level0 = self._make_conv_level(channels[0], channels[0], levels[0])
+ self.level1 = self._make_conv_level(
+ channels[0], channels[1], levels[1], stride=2)
+ self.level2 = Tree(
+ levels[2],
+ block,
+ channels[1],
+ channels[2],
+ 2,
+ level_root=False,
+ root_residual=residual_root)
+ self.level3 = Tree(
+ levels[3],
+ block,
+ channels[2],
+ channels[3],
+ 2,
+ level_root=True,
+ root_residual=residual_root)
+ self.level4 = Tree(
+ levels[4],
+ block,
+ channels[3],
+ channels[4],
+ 2,
+ level_root=True,
+ root_residual=residual_root)
+ self.level5 = Tree(
+ levels[5],
+ block,
+ channels[4],
+ channels[5],
+ 2,
+ level_root=True,
+ root_residual=residual_root)
+
+ def _make_conv_level(self, ch_in, ch_out, conv_num, stride=1):
+ modules = []
+ for i in range(conv_num):
+ modules.extend([
+ ConvNormLayer(
+ ch_in,
+ ch_out,
+ filter_size=3,
+ stride=stride if i == 0 else 1,
+ bias_on=False,
+ norm_decay=None), nn.ReLU()
+ ])
+ ch_in = ch_out
+ return nn.Sequential(*modules)
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=self.channels[i]) for i in range(6)]
+
+ def forward(self, inputs):
+ outs = []
+ im = inputs['image']
+ feats = self.base_layer(im)
+ for i in range(6):
+ feats = getattr(self, 'level{}'.format(i))(feats)
+ outs.append(feats)
+
+ return outs
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/esnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/esnet.py
new file mode 100644
index 000000000..2b3f3c54a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/esnet.py
@@ -0,0 +1,290 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn import Conv2D, MaxPool2D, AdaptiveAvgPool2D, BatchNorm
+from paddle.nn.initializer import KaimingNormal
+from paddle.regularizer import L2Decay
+
+from ppdet.core.workspace import register, serializable
+from numbers import Integral
+from ..shape_spec import ShapeSpec
+from ppdet.modeling.ops import channel_shuffle
+from ppdet.modeling.backbones.shufflenet_v2 import ConvBNLayer
+
+__all__ = ['ESNet']
+
+
+def make_divisible(v, divisor=16, min_value=None):
+ if min_value is None:
+ min_value = divisor
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
+ if new_v < 0.9 * v:
+ new_v += divisor
+ return new_v
+
+
+class SEModule(nn.Layer):
+ def __init__(self, channel, reduction=4):
+ super(SEModule, self).__init__()
+ self.avg_pool = AdaptiveAvgPool2D(1)
+ self.conv1 = Conv2D(
+ in_channels=channel,
+ out_channels=channel // reduction,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ weight_attr=ParamAttr(),
+ bias_attr=ParamAttr())
+ self.conv2 = Conv2D(
+ in_channels=channel // reduction,
+ out_channels=channel,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ weight_attr=ParamAttr(),
+ bias_attr=ParamAttr())
+
+ def forward(self, inputs):
+ outputs = self.avg_pool(inputs)
+ outputs = self.conv1(outputs)
+ outputs = F.relu(outputs)
+ outputs = self.conv2(outputs)
+ outputs = F.hardsigmoid(outputs)
+ return paddle.multiply(x=inputs, y=outputs)
+
+
+class InvertedResidual(nn.Layer):
+ def __init__(self,
+ in_channels,
+ mid_channels,
+ out_channels,
+ stride,
+ act="relu"):
+ super(InvertedResidual, self).__init__()
+ self._conv_pw = ConvBNLayer(
+ in_channels=in_channels // 2,
+ out_channels=mid_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+ self._conv_dw = ConvBNLayer(
+ in_channels=mid_channels // 2,
+ out_channels=mid_channels // 2,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ groups=mid_channels // 2,
+ act=None)
+ self._se = SEModule(mid_channels)
+
+ self._conv_linear = ConvBNLayer(
+ in_channels=mid_channels,
+ out_channels=out_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+
+ def forward(self, inputs):
+ x1, x2 = paddle.split(
+ inputs,
+ num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2],
+ axis=1)
+ x2 = self._conv_pw(x2)
+ x3 = self._conv_dw(x2)
+ x3 = paddle.concat([x2, x3], axis=1)
+ x3 = self._se(x3)
+ x3 = self._conv_linear(x3)
+ out = paddle.concat([x1, x3], axis=1)
+ return channel_shuffle(out, 2)
+
+
+class InvertedResidualDS(nn.Layer):
+ def __init__(self,
+ in_channels,
+ mid_channels,
+ out_channels,
+ stride,
+ act="relu"):
+ super(InvertedResidualDS, self).__init__()
+
+ # branch1
+ self._conv_dw_1 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=in_channels,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ groups=in_channels,
+ act=None)
+ self._conv_linear_1 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+ # branch2
+ self._conv_pw_2 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=mid_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+ self._conv_dw_2 = ConvBNLayer(
+ in_channels=mid_channels // 2,
+ out_channels=mid_channels // 2,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ groups=mid_channels // 2,
+ act=None)
+ self._se = SEModule(mid_channels // 2)
+ self._conv_linear_2 = ConvBNLayer(
+ in_channels=mid_channels // 2,
+ out_channels=out_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+ self._conv_dw_mv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ groups=out_channels,
+ act="hard_swish")
+ self._conv_pw_mv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act="hard_swish")
+
+ def forward(self, inputs):
+ x1 = self._conv_dw_1(inputs)
+ x1 = self._conv_linear_1(x1)
+ x2 = self._conv_pw_2(inputs)
+ x2 = self._conv_dw_2(x2)
+ x2 = self._se(x2)
+ x2 = self._conv_linear_2(x2)
+ out = paddle.concat([x1, x2], axis=1)
+ out = self._conv_dw_mv1(out)
+ out = self._conv_pw_mv1(out)
+
+ return out
+
+
+@register
+@serializable
+class ESNet(nn.Layer):
+ def __init__(self,
+ scale=1.0,
+ act="hard_swish",
+ feature_maps=[4, 11, 14],
+ channel_ratio=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]):
+ super(ESNet, self).__init__()
+ self.scale = scale
+ if isinstance(feature_maps, Integral):
+ feature_maps = [feature_maps]
+ self.feature_maps = feature_maps
+ stage_repeats = [3, 7, 3]
+
+ stage_out_channels = [
+ -1, 24, make_divisible(128 * scale), make_divisible(256 * scale),
+ make_divisible(512 * scale), 1024
+ ]
+
+ self._out_channels = []
+ self._feature_idx = 0
+ # 1. conv1
+ self._conv1 = ConvBNLayer(
+ in_channels=3,
+ out_channels=stage_out_channels[1],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ act=act)
+ self._max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1)
+ self._feature_idx += 1
+
+ # 2. bottleneck sequences
+ self._block_list = []
+ arch_idx = 0
+ for stage_id, num_repeat in enumerate(stage_repeats):
+ for i in range(num_repeat):
+ channels_scales = channel_ratio[arch_idx]
+ mid_c = make_divisible(
+ int(stage_out_channels[stage_id + 2] * channels_scales),
+ divisor=8)
+ if i == 0:
+ block = self.add_sublayer(
+ name=str(stage_id + 2) + '_' + str(i + 1),
+ sublayer=InvertedResidualDS(
+ in_channels=stage_out_channels[stage_id + 1],
+ mid_channels=mid_c,
+ out_channels=stage_out_channels[stage_id + 2],
+ stride=2,
+ act=act))
+ else:
+ block = self.add_sublayer(
+ name=str(stage_id + 2) + '_' + str(i + 1),
+ sublayer=InvertedResidual(
+ in_channels=stage_out_channels[stage_id + 2],
+ mid_channels=mid_c,
+ out_channels=stage_out_channels[stage_id + 2],
+ stride=1,
+ act=act))
+ self._block_list.append(block)
+ arch_idx += 1
+ self._feature_idx += 1
+ self._update_out_channels(stage_out_channels[stage_id + 2],
+ self._feature_idx, self.feature_maps)
+
+ def _update_out_channels(self, channel, feature_idx, feature_maps):
+ if feature_idx in feature_maps:
+ self._out_channels.append(channel)
+
+ def forward(self, inputs):
+ y = self._conv1(inputs['image'])
+ y = self._max_pool(y)
+ outs = []
+ for i, inv in enumerate(self._block_list):
+ y = inv(y)
+ if i + 2 in self.feature_maps:
+ outs.append(y)
+
+ return outs
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/ghostnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/ghostnet.py
new file mode 100644
index 000000000..cd333b4fe
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/ghostnet.py
@@ -0,0 +1,470 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import paddle
+from paddle import ParamAttr
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn import AdaptiveAvgPool2D, Linear
+from paddle.nn.initializer import Uniform
+
+from ppdet.core.workspace import register, serializable
+from numbers import Integral
+from ..shape_spec import ShapeSpec
+from .mobilenet_v3 import make_divisible, ConvBNLayer
+
+__all__ = ['GhostNet']
+
+
+class ExtraBlockDW(nn.Layer):
+ def __init__(self,
+ in_c,
+ ch_1,
+ ch_2,
+ stride,
+ lr_mult,
+ conv_decay=0.,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ name=None):
+ super(ExtraBlockDW, self).__init__()
+ self.pointwise_conv = ConvBNLayer(
+ in_c=in_c,
+ out_c=ch_1,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ act='relu6',
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_extra1")
+ self.depthwise_conv = ConvBNLayer(
+ in_c=ch_1,
+ out_c=ch_2,
+ filter_size=3,
+ stride=stride,
+ padding=1, #
+ num_groups=int(ch_1),
+ act='relu6',
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_extra2_dw")
+ self.normal_conv = ConvBNLayer(
+ in_c=ch_2,
+ out_c=ch_2,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ act='relu6',
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_extra2_sep")
+
+ def forward(self, inputs):
+ x = self.pointwise_conv(inputs)
+ x = self.depthwise_conv(x)
+ x = self.normal_conv(x)
+ return x
+
+
+class SEBlock(nn.Layer):
+ def __init__(self, num_channels, lr_mult, reduction_ratio=4, name=None):
+ super(SEBlock, self).__init__()
+ self.pool2d_gap = AdaptiveAvgPool2D(1)
+ self._num_channels = num_channels
+ stdv = 1.0 / math.sqrt(num_channels * 1.0)
+ med_ch = num_channels // reduction_ratio
+ self.squeeze = Linear(
+ num_channels,
+ med_ch,
+ weight_attr=ParamAttr(
+ learning_rate=lr_mult, initializer=Uniform(-stdv, stdv)),
+ bias_attr=ParamAttr(learning_rate=lr_mult))
+ stdv = 1.0 / math.sqrt(med_ch * 1.0)
+ self.excitation = Linear(
+ med_ch,
+ num_channels,
+ weight_attr=ParamAttr(
+ learning_rate=lr_mult, initializer=Uniform(-stdv, stdv)),
+ bias_attr=ParamAttr(learning_rate=lr_mult))
+
+ def forward(self, inputs):
+ pool = self.pool2d_gap(inputs)
+ pool = paddle.squeeze(pool, axis=[2, 3])
+ squeeze = self.squeeze(pool)
+ squeeze = F.relu(squeeze)
+ excitation = self.excitation(squeeze)
+ excitation = paddle.clip(x=excitation, min=0, max=1)
+ excitation = paddle.unsqueeze(excitation, axis=[2, 3])
+ out = paddle.multiply(inputs, excitation)
+ return out
+
+
+class GhostModule(nn.Layer):
+ def __init__(self,
+ in_channels,
+ output_channels,
+ kernel_size=1,
+ ratio=2,
+ dw_size=3,
+ stride=1,
+ relu=True,
+ lr_mult=1.,
+ conv_decay=0.,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ name=None):
+ super(GhostModule, self).__init__()
+ init_channels = int(math.ceil(output_channels / ratio))
+ new_channels = int(init_channels * (ratio - 1))
+ self.primary_conv = ConvBNLayer(
+ in_c=in_channels,
+ out_c=init_channels,
+ filter_size=kernel_size,
+ stride=stride,
+ padding=int((kernel_size - 1) // 2),
+ num_groups=1,
+ act="relu" if relu else None,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_primary_conv")
+ self.cheap_operation = ConvBNLayer(
+ in_c=init_channels,
+ out_c=new_channels,
+ filter_size=dw_size,
+ stride=1,
+ padding=int((dw_size - 1) // 2),
+ num_groups=init_channels,
+ act="relu" if relu else None,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_cheap_operation")
+
+ def forward(self, inputs):
+ x = self.primary_conv(inputs)
+ y = self.cheap_operation(x)
+ out = paddle.concat([x, y], axis=1)
+ return out
+
+
+class GhostBottleneck(nn.Layer):
+ def __init__(self,
+ in_channels,
+ hidden_dim,
+ output_channels,
+ kernel_size,
+ stride,
+ use_se,
+ lr_mult,
+ conv_decay=0.,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ return_list=False,
+ name=None):
+ super(GhostBottleneck, self).__init__()
+ self._stride = stride
+ self._use_se = use_se
+ self._num_channels = in_channels
+ self._output_channels = output_channels
+ self.return_list = return_list
+
+ self.ghost_module_1 = GhostModule(
+ in_channels=in_channels,
+ output_channels=hidden_dim,
+ kernel_size=1,
+ stride=1,
+ relu=True,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_ghost_module_1")
+ if stride == 2:
+ self.depthwise_conv = ConvBNLayer(
+ in_c=hidden_dim,
+ out_c=hidden_dim,
+ filter_size=kernel_size,
+ stride=stride,
+ padding=int((kernel_size - 1) // 2),
+ num_groups=hidden_dim,
+ act=None,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name +
+ "_depthwise_depthwise" # looks strange due to an old typo, will be fixed later.
+ )
+ if use_se:
+ self.se_block = SEBlock(hidden_dim, lr_mult, name=name + "_se")
+ self.ghost_module_2 = GhostModule(
+ in_channels=hidden_dim,
+ output_channels=output_channels,
+ kernel_size=1,
+ relu=False,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_ghost_module_2")
+ if stride != 1 or in_channels != output_channels:
+ self.shortcut_depthwise = ConvBNLayer(
+ in_c=in_channels,
+ out_c=in_channels,
+ filter_size=kernel_size,
+ stride=stride,
+ padding=int((kernel_size - 1) // 2),
+ num_groups=in_channels,
+ act=None,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name +
+ "_shortcut_depthwise_depthwise" # looks strange due to an old typo, will be fixed later.
+ )
+ self.shortcut_conv = ConvBNLayer(
+ in_c=in_channels,
+ out_c=output_channels,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ num_groups=1,
+ act=None,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_shortcut_conv")
+
+ def forward(self, inputs):
+ y = self.ghost_module_1(inputs)
+ x = y
+ if self._stride == 2:
+ x = self.depthwise_conv(x)
+ if self._use_se:
+ x = self.se_block(x)
+ x = self.ghost_module_2(x)
+
+ if self._stride == 1 and self._num_channels == self._output_channels:
+ shortcut = inputs
+ else:
+ shortcut = self.shortcut_depthwise(inputs)
+ shortcut = self.shortcut_conv(shortcut)
+ x = paddle.add(x=x, y=shortcut)
+
+ if self.return_list:
+ return [y, x]
+ else:
+ return x
+
+
+@register
+@serializable
+class GhostNet(nn.Layer):
+ __shared__ = ['norm_type']
+
+ def __init__(
+ self,
+ scale=1.3,
+ feature_maps=[6, 12, 15],
+ with_extra_blocks=False,
+ extra_block_filters=[[256, 512], [128, 256], [128, 256], [64, 128]],
+ lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0],
+ conv_decay=0.,
+ norm_type='bn',
+ norm_decay=0.0,
+ freeze_norm=False):
+ super(GhostNet, self).__init__()
+ if isinstance(feature_maps, Integral):
+ feature_maps = [feature_maps]
+ if norm_type == 'sync_bn' and freeze_norm:
+ raise ValueError(
+ "The norm_type should not be sync_bn when freeze_norm is True")
+ self.feature_maps = feature_maps
+ self.with_extra_blocks = with_extra_blocks
+ self.extra_block_filters = extra_block_filters
+
+ inplanes = 16
+ self.cfgs = [
+ # k, t, c, SE, s
+ [3, 16, 16, 0, 1],
+ [3, 48, 24, 0, 2],
+ [3, 72, 24, 0, 1],
+ [5, 72, 40, 1, 2],
+ [5, 120, 40, 1, 1],
+ [3, 240, 80, 0, 2],
+ [3, 200, 80, 0, 1],
+ [3, 184, 80, 0, 1],
+ [3, 184, 80, 0, 1],
+ [3, 480, 112, 1, 1],
+ [3, 672, 112, 1, 1],
+ [5, 672, 160, 1, 2], # SSDLite output
+ [5, 960, 160, 0, 1],
+ [5, 960, 160, 1, 1],
+ [5, 960, 160, 0, 1],
+ [5, 960, 160, 1, 1]
+ ]
+ self.scale = scale
+ conv1_out_ch = int(make_divisible(inplanes * self.scale, 4))
+ self.conv1 = ConvBNLayer(
+ in_c=3,
+ out_c=conv1_out_ch,
+ filter_size=3,
+ stride=2,
+ padding=1,
+ num_groups=1,
+ act="relu",
+ lr_mult=1.,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="conv1")
+
+ # build inverted residual blocks
+ self._out_channels = []
+ self.ghost_bottleneck_list = []
+ idx = 0
+ inplanes = conv1_out_ch
+ for k, exp_size, c, use_se, s in self.cfgs:
+ lr_idx = min(idx // 3, len(lr_mult_list) - 1)
+ lr_mult = lr_mult_list[lr_idx]
+
+ # for SSD/SSDLite, first head input is after ResidualUnit expand_conv
+ return_list = self.with_extra_blocks and idx + 2 in self.feature_maps
+
+ ghost_bottleneck = self.add_sublayer(
+ "_ghostbottleneck_" + str(idx),
+ sublayer=GhostBottleneck(
+ in_channels=inplanes,
+ hidden_dim=int(make_divisible(exp_size * self.scale, 4)),
+ output_channels=int(make_divisible(c * self.scale, 4)),
+ kernel_size=k,
+ stride=s,
+ use_se=use_se,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ return_list=return_list,
+ name="_ghostbottleneck_" + str(idx)))
+ self.ghost_bottleneck_list.append(ghost_bottleneck)
+ inplanes = int(make_divisible(c * self.scale, 4))
+ idx += 1
+ self._update_out_channels(
+ int(make_divisible(exp_size * self.scale, 4))
+ if return_list else inplanes, idx + 1, feature_maps)
+
+ if self.with_extra_blocks:
+ self.extra_block_list = []
+ extra_out_c = int(make_divisible(self.scale * self.cfgs[-1][1], 4))
+ lr_idx = min(idx // 3, len(lr_mult_list) - 1)
+ lr_mult = lr_mult_list[lr_idx]
+
+ conv_extra = self.add_sublayer(
+ "conv" + str(idx + 2),
+ sublayer=ConvBNLayer(
+ in_c=inplanes,
+ out_c=extra_out_c,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ num_groups=1,
+ act="relu6",
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="conv" + str(idx + 2)))
+ self.extra_block_list.append(conv_extra)
+ idx += 1
+ self._update_out_channels(extra_out_c, idx + 1, feature_maps)
+
+ for j, block_filter in enumerate(self.extra_block_filters):
+ in_c = extra_out_c if j == 0 else self.extra_block_filters[j -
+ 1][1]
+ conv_extra = self.add_sublayer(
+ "conv" + str(idx + 2),
+ sublayer=ExtraBlockDW(
+ in_c,
+ block_filter[0],
+ block_filter[1],
+ stride=2,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name='conv' + str(idx + 2)))
+ self.extra_block_list.append(conv_extra)
+ idx += 1
+ self._update_out_channels(block_filter[1], idx + 1,
+ feature_maps)
+
+ def _update_out_channels(self, channel, feature_idx, feature_maps):
+ if feature_idx in feature_maps:
+ self._out_channels.append(channel)
+
+ def forward(self, inputs):
+ x = self.conv1(inputs['image'])
+ outs = []
+ for idx, ghost_bottleneck in enumerate(self.ghost_bottleneck_list):
+ x = ghost_bottleneck(x)
+ if idx + 2 in self.feature_maps:
+ if isinstance(x, list):
+ outs.append(x[0])
+ x = x[1]
+ else:
+ outs.append(x)
+
+ if not self.with_extra_blocks:
+ return outs
+
+ for i, block in enumerate(self.extra_block_list):
+ idx = i + len(self.ghost_bottleneck_list)
+ x = block(x)
+ if idx + 2 in self.feature_maps:
+ outs.append(x)
+ return outs
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/hardnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/hardnet.py
new file mode 100644
index 000000000..14a1599df
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/hardnet.py
@@ -0,0 +1,224 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+from ppdet.core.workspace import register
+from ..shape_spec import ShapeSpec
+
+__all__ = ['HarDNet']
+
+
+def ConvLayer(in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ bias_attr=False):
+ layer = nn.Sequential(
+ ('conv', nn.Conv2D(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=kernel_size // 2,
+ groups=1,
+ bias_attr=bias_attr)), ('norm', nn.BatchNorm2D(out_channels)),
+ ('relu', nn.ReLU6()))
+ return layer
+
+
+def DWConvLayer(in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ bias_attr=False):
+ layer = nn.Sequential(
+ ('dwconv', nn.Conv2D(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=1,
+ groups=out_channels,
+ bias_attr=bias_attr)), ('norm', nn.BatchNorm2D(out_channels)))
+ return layer
+
+
+def CombConvLayer(in_channels, out_channels, kernel_size=1, stride=1):
+ layer = nn.Sequential(
+ ('layer1', ConvLayer(
+ in_channels, out_channels, kernel_size=kernel_size)),
+ ('layer2', DWConvLayer(
+ out_channels, out_channels, stride=stride)))
+ return layer
+
+
+class HarDBlock(nn.Layer):
+ def __init__(self,
+ in_channels,
+ growth_rate,
+ grmul,
+ n_layers,
+ keepBase=False,
+ residual_out=False,
+ dwconv=False):
+ super().__init__()
+ self.keepBase = keepBase
+ self.links = []
+ layers_ = []
+ self.out_channels = 0
+ for i in range(n_layers):
+ outch, inch, link = self.get_link(i + 1, in_channels, growth_rate,
+ grmul)
+ self.links.append(link)
+ if dwconv:
+ layers_.append(CombConvLayer(inch, outch))
+ else:
+ layers_.append(ConvLayer(inch, outch))
+
+ if (i % 2 == 0) or (i == n_layers - 1):
+ self.out_channels += outch
+ self.layers = nn.LayerList(layers_)
+
+ def get_out_ch(self):
+ return self.out_channels
+
+ def get_link(self, layer, base_ch, growth_rate, grmul):
+ if layer == 0:
+ return base_ch, 0, []
+ out_channels = growth_rate
+
+ link = []
+ for i in range(10):
+ dv = 2**i
+ if layer % dv == 0:
+ k = layer - dv
+ link.append(k)
+ if i > 0:
+ out_channels *= grmul
+
+ out_channels = int(int(out_channels + 1) / 2) * 2
+ in_channels = 0
+
+ for i in link:
+ ch, _, _ = self.get_link(i, base_ch, growth_rate, grmul)
+ in_channels += ch
+
+ return out_channels, in_channels, link
+
+ def forward(self, x):
+ layers_ = [x]
+
+ for layer in range(len(self.layers)):
+ link = self.links[layer]
+ tin = []
+ for i in link:
+ tin.append(layers_[i])
+ if len(tin) > 1:
+ x = paddle.concat(tin, 1)
+ else:
+ x = tin[0]
+ out = self.layers[layer](x)
+ layers_.append(out)
+
+ t = len(layers_)
+ out_ = []
+ for i in range(t):
+ if (i == 0 and self.keepBase) or (i == t - 1) or (i % 2 == 1):
+ out_.append(layers_[i])
+ out = paddle.concat(out_, 1)
+
+ return out
+
+
+@register
+class HarDNet(nn.Layer):
+ def __init__(self, depth_wise=False, return_idx=[1, 3, 8, 13], arch=85):
+ super(HarDNet, self).__init__()
+ assert arch in [39, 68, 85], "HarDNet-{} not support.".format(arch)
+ if arch == 85:
+ first_ch = [48, 96]
+ second_kernel = 3
+ ch_list = [192, 256, 320, 480, 720]
+ grmul = 1.7
+ gr = [24, 24, 28, 36, 48]
+ n_layers = [8, 16, 16, 16, 16]
+ elif arch == 68:
+ first_ch = [32, 64]
+ second_kernel = 3
+ ch_list = [128, 256, 320, 640]
+ grmul = 1.7
+ gr = [14, 16, 20, 40]
+ n_layers = [8, 16, 16, 16]
+
+ self.return_idx = return_idx
+ self._out_channels = [96, 214, 458, 784]
+
+ avg_pool = True
+ if depth_wise:
+ second_kernel = 1
+ avg_pool = False
+
+ blks = len(n_layers)
+ self.base = nn.LayerList([])
+
+ # First Layer: Standard Conv3x3, Stride=2
+ self.base.append(
+ ConvLayer(
+ in_channels=3,
+ out_channels=first_ch[0],
+ kernel_size=3,
+ stride=2,
+ bias_attr=False))
+
+ # Second Layer
+ self.base.append(
+ ConvLayer(
+ first_ch[0], first_ch[1], kernel_size=second_kernel))
+
+ # Avgpooling or DWConv3x3 downsampling
+ if avg_pool:
+ self.base.append(nn.AvgPool2D(kernel_size=3, stride=2, padding=1))
+ else:
+ self.base.append(DWConvLayer(first_ch[1], first_ch[1], stride=2))
+
+ # Build all HarDNet blocks
+ ch = first_ch[1]
+ for i in range(blks):
+ blk = HarDBlock(ch, gr[i], grmul, n_layers[i], dwconv=depth_wise)
+ ch = blk.out_channels
+ self.base.append(blk)
+
+ if i != blks - 1:
+ self.base.append(ConvLayer(ch, ch_list[i], kernel_size=1))
+ ch = ch_list[i]
+ if i == 0:
+ self.base.append(
+ nn.AvgPool2D(
+ kernel_size=2, stride=2, ceil_mode=True))
+ elif i != blks - 1 and i != 1 and i != 3:
+ self.base.append(nn.AvgPool2D(kernel_size=2, stride=2))
+
+ def forward(self, inputs):
+ x = inputs['image']
+ outs = []
+ for i, layer in enumerate(self.base):
+ x = layer(x)
+ if i in self.return_idx:
+ outs.append(x)
+ return outs
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=self._out_channels[i]) for i in range(4)]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/hrnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/hrnet.py
new file mode 100644
index 000000000..d92aa95f5
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/hrnet.py
@@ -0,0 +1,727 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn import AdaptiveAvgPool2D, Linear
+from paddle.regularizer import L2Decay
+from paddle import ParamAttr
+from paddle.nn.initializer import Normal, Uniform
+from numbers import Integral
+import math
+
+from ppdet.core.workspace import register
+from ..shape_spec import ShapeSpec
+
+__all__ = ['HRNet']
+
+
+class ConvNormLayer(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ filter_size,
+ stride=1,
+ norm_type='bn',
+ norm_groups=32,
+ use_dcn=False,
+ norm_decay=0.,
+ freeze_norm=False,
+ act=None,
+ name=None):
+ super(ConvNormLayer, self).__init__()
+ assert norm_type in ['bn', 'sync_bn', 'gn']
+
+ self.act = act
+ self.conv = nn.Conv2D(
+ in_channels=ch_in,
+ out_channels=ch_out,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ groups=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=False)
+
+ norm_lr = 0. if freeze_norm else 1.
+
+ param_attr = ParamAttr(
+ learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
+ bias_attr = ParamAttr(
+ learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
+ global_stats = True if freeze_norm else False
+ if norm_type in ['bn', 'sync_bn']:
+ self.norm = nn.BatchNorm(
+ ch_out,
+ param_attr=param_attr,
+ bias_attr=bias_attr,
+ use_global_stats=global_stats)
+ elif norm_type == 'gn':
+ self.norm = nn.GroupNorm(
+ num_groups=norm_groups,
+ num_channels=ch_out,
+ weight_attr=param_attr,
+ bias_attr=bias_attr)
+ norm_params = self.norm.parameters()
+ if freeze_norm:
+ for param in norm_params:
+ param.stop_gradient = True
+
+ def forward(self, inputs):
+ out = self.conv(inputs)
+ out = self.norm(out)
+
+ if self.act == 'relu':
+ out = F.relu(out)
+ return out
+
+
+class Layer1(nn.Layer):
+ def __init__(self,
+ num_channels,
+ has_se=False,
+ norm_decay=0.,
+ freeze_norm=True,
+ name=None):
+ super(Layer1, self).__init__()
+
+ self.bottleneck_block_list = []
+
+ for i in range(4):
+ bottleneck_block = self.add_sublayer(
+ "block_{}_{}".format(name, i + 1),
+ BottleneckBlock(
+ num_channels=num_channels if i == 0 else 256,
+ num_filters=64,
+ has_se=has_se,
+ stride=1,
+ downsample=True if i == 0 else False,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + '_' + str(i + 1)))
+ self.bottleneck_block_list.append(bottleneck_block)
+
+ def forward(self, input):
+ conv = input
+ for block_func in self.bottleneck_block_list:
+ conv = block_func(conv)
+ return conv
+
+
+class TransitionLayer(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ norm_decay=0.,
+ freeze_norm=True,
+ name=None):
+ super(TransitionLayer, self).__init__()
+
+ num_in = len(in_channels)
+ num_out = len(out_channels)
+ out = []
+ self.conv_bn_func_list = []
+ for i in range(num_out):
+ residual = None
+ if i < num_in:
+ if in_channels[i] != out_channels[i]:
+ residual = self.add_sublayer(
+ "transition_{}_layer_{}".format(name, i + 1),
+ ConvNormLayer(
+ ch_in=in_channels[i],
+ ch_out=out_channels[i],
+ filter_size=3,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act='relu',
+ name=name + '_layer_' + str(i + 1)))
+ else:
+ residual = self.add_sublayer(
+ "transition_{}_layer_{}".format(name, i + 1),
+ ConvNormLayer(
+ ch_in=in_channels[-1],
+ ch_out=out_channels[i],
+ filter_size=3,
+ stride=2,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act='relu',
+ name=name + '_layer_' + str(i + 1)))
+ self.conv_bn_func_list.append(residual)
+
+ def forward(self, input):
+ outs = []
+ for idx, conv_bn_func in enumerate(self.conv_bn_func_list):
+ if conv_bn_func is None:
+ outs.append(input[idx])
+ else:
+ if idx < len(input):
+ outs.append(conv_bn_func(input[idx]))
+ else:
+ outs.append(conv_bn_func(input[-1]))
+ return outs
+
+
+class Branches(nn.Layer):
+ def __init__(self,
+ block_num,
+ in_channels,
+ out_channels,
+ has_se=False,
+ norm_decay=0.,
+ freeze_norm=True,
+ name=None):
+ super(Branches, self).__init__()
+
+ self.basic_block_list = []
+ for i in range(len(out_channels)):
+ self.basic_block_list.append([])
+ for j in range(block_num):
+ in_ch = in_channels[i] if j == 0 else out_channels[i]
+ basic_block_func = self.add_sublayer(
+ "bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1),
+ BasicBlock(
+ num_channels=in_ch,
+ num_filters=out_channels[i],
+ has_se=has_se,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + '_branch_layer_' + str(i + 1) + '_' +
+ str(j + 1)))
+ self.basic_block_list[i].append(basic_block_func)
+
+ def forward(self, inputs):
+ outs = []
+ for idx, input in enumerate(inputs):
+ conv = input
+ basic_block_list = self.basic_block_list[idx]
+ for basic_block_func in basic_block_list:
+ conv = basic_block_func(conv)
+ outs.append(conv)
+ return outs
+
+
+class BottleneckBlock(nn.Layer):
+ def __init__(self,
+ num_channels,
+ num_filters,
+ has_se,
+ stride=1,
+ downsample=False,
+ norm_decay=0.,
+ freeze_norm=True,
+ name=None):
+ super(BottleneckBlock, self).__init__()
+
+ self.has_se = has_se
+ self.downsample = downsample
+
+ self.conv1 = ConvNormLayer(
+ ch_in=num_channels,
+ ch_out=num_filters,
+ filter_size=1,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act="relu",
+ name=name + "_conv1")
+ self.conv2 = ConvNormLayer(
+ ch_in=num_filters,
+ ch_out=num_filters,
+ filter_size=3,
+ stride=stride,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act="relu",
+ name=name + "_conv2")
+ self.conv3 = ConvNormLayer(
+ ch_in=num_filters,
+ ch_out=num_filters * 4,
+ filter_size=1,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act=None,
+ name=name + "_conv3")
+
+ if self.downsample:
+ self.conv_down = ConvNormLayer(
+ ch_in=num_channels,
+ ch_out=num_filters * 4,
+ filter_size=1,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act=None,
+ name=name + "_downsample")
+
+ if self.has_se:
+ self.se = SELayer(
+ num_channels=num_filters * 4,
+ num_filters=num_filters * 4,
+ reduction_ratio=16,
+ name='fc' + name)
+
+ def forward(self, input):
+ residual = input
+ conv1 = self.conv1(input)
+ conv2 = self.conv2(conv1)
+ conv3 = self.conv3(conv2)
+
+ if self.downsample:
+ residual = self.conv_down(input)
+
+ if self.has_se:
+ conv3 = self.se(conv3)
+
+ y = paddle.add(x=residual, y=conv3)
+ y = F.relu(y)
+ return y
+
+
+class BasicBlock(nn.Layer):
+ def __init__(self,
+ num_channels,
+ num_filters,
+ stride=1,
+ has_se=False,
+ downsample=False,
+ norm_decay=0.,
+ freeze_norm=True,
+ name=None):
+ super(BasicBlock, self).__init__()
+
+ self.has_se = has_se
+ self.downsample = downsample
+ self.conv1 = ConvNormLayer(
+ ch_in=num_channels,
+ ch_out=num_filters,
+ filter_size=3,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ stride=stride,
+ act="relu",
+ name=name + "_conv1")
+ self.conv2 = ConvNormLayer(
+ ch_in=num_filters,
+ ch_out=num_filters,
+ filter_size=3,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ stride=1,
+ act=None,
+ name=name + "_conv2")
+
+ if self.downsample:
+ self.conv_down = ConvNormLayer(
+ ch_in=num_channels,
+ ch_out=num_filters * 4,
+ filter_size=1,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act=None,
+ name=name + "_downsample")
+
+ if self.has_se:
+ self.se = SELayer(
+ num_channels=num_filters,
+ num_filters=num_filters,
+ reduction_ratio=16,
+ name='fc' + name)
+
+ def forward(self, input):
+ residual = input
+ conv1 = self.conv1(input)
+ conv2 = self.conv2(conv1)
+
+ if self.downsample:
+ residual = self.conv_down(input)
+
+ if self.has_se:
+ conv2 = self.se(conv2)
+
+ y = paddle.add(x=residual, y=conv2)
+ y = F.relu(y)
+ return y
+
+
+class SELayer(nn.Layer):
+ def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
+ super(SELayer, self).__init__()
+
+ self.pool2d_gap = AdaptiveAvgPool2D(1)
+
+ self._num_channels = num_channels
+
+ med_ch = int(num_channels / reduction_ratio)
+ stdv = 1.0 / math.sqrt(num_channels * 1.0)
+ self.squeeze = Linear(
+ num_channels,
+ med_ch,
+ weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
+
+ stdv = 1.0 / math.sqrt(med_ch * 1.0)
+ self.excitation = Linear(
+ med_ch,
+ num_filters,
+ weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
+
+ def forward(self, input):
+ pool = self.pool2d_gap(input)
+ pool = paddle.squeeze(pool, axis=[2, 3])
+ squeeze = self.squeeze(pool)
+ squeeze = F.relu(squeeze)
+ excitation = self.excitation(squeeze)
+ excitation = F.sigmoid(excitation)
+ excitation = paddle.unsqueeze(excitation, axis=[2, 3])
+ out = input * excitation
+ return out
+
+
+class Stage(nn.Layer):
+ def __init__(self,
+ num_channels,
+ num_modules,
+ num_filters,
+ has_se=False,
+ norm_decay=0.,
+ freeze_norm=True,
+ multi_scale_output=True,
+ name=None):
+ super(Stage, self).__init__()
+
+ self._num_modules = num_modules
+ self.stage_func_list = []
+ for i in range(num_modules):
+ if i == num_modules - 1 and not multi_scale_output:
+ stage_func = self.add_sublayer(
+ "stage_{}_{}".format(name, i + 1),
+ HighResolutionModule(
+ num_channels=num_channels,
+ num_filters=num_filters,
+ has_se=has_se,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ multi_scale_output=False,
+ name=name + '_' + str(i + 1)))
+ else:
+ stage_func = self.add_sublayer(
+ "stage_{}_{}".format(name, i + 1),
+ HighResolutionModule(
+ num_channels=num_channels,
+ num_filters=num_filters,
+ has_se=has_se,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + '_' + str(i + 1)))
+
+ self.stage_func_list.append(stage_func)
+
+ def forward(self, input):
+ out = input
+ for idx in range(self._num_modules):
+ out = self.stage_func_list[idx](out)
+ return out
+
+
+class HighResolutionModule(nn.Layer):
+ def __init__(self,
+ num_channels,
+ num_filters,
+ has_se=False,
+ multi_scale_output=True,
+ norm_decay=0.,
+ freeze_norm=True,
+ name=None):
+ super(HighResolutionModule, self).__init__()
+ self.branches_func = Branches(
+ block_num=4,
+ in_channels=num_channels,
+ out_channels=num_filters,
+ has_se=has_se,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name)
+
+ self.fuse_func = FuseLayers(
+ in_channels=num_filters,
+ out_channels=num_filters,
+ multi_scale_output=multi_scale_output,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name)
+
+ def forward(self, input):
+ out = self.branches_func(input)
+ out = self.fuse_func(out)
+ return out
+
+
+class FuseLayers(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ multi_scale_output=True,
+ norm_decay=0.,
+ freeze_norm=True,
+ name=None):
+ super(FuseLayers, self).__init__()
+
+ self._actual_ch = len(in_channels) if multi_scale_output else 1
+ self._in_channels = in_channels
+
+ self.residual_func_list = []
+ for i in range(self._actual_ch):
+ for j in range(len(in_channels)):
+ residual_func = None
+ if j > i:
+ residual_func = self.add_sublayer(
+ "residual_{}_layer_{}_{}".format(name, i + 1, j + 1),
+ ConvNormLayer(
+ ch_in=in_channels[j],
+ ch_out=out_channels[i],
+ filter_size=1,
+ stride=1,
+ act=None,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + '_layer_' + str(i + 1) + '_' +
+ str(j + 1)))
+ self.residual_func_list.append(residual_func)
+ elif j < i:
+ pre_num_filters = in_channels[j]
+ for k in range(i - j):
+ if k == i - j - 1:
+ residual_func = self.add_sublayer(
+ "residual_{}_layer_{}_{}_{}".format(
+ name, i + 1, j + 1, k + 1),
+ ConvNormLayer(
+ ch_in=pre_num_filters,
+ ch_out=out_channels[i],
+ filter_size=3,
+ stride=2,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act=None,
+ name=name + '_layer_' + str(i + 1) + '_' +
+ str(j + 1) + '_' + str(k + 1)))
+ pre_num_filters = out_channels[i]
+ else:
+ residual_func = self.add_sublayer(
+ "residual_{}_layer_{}_{}_{}".format(
+ name, i + 1, j + 1, k + 1),
+ ConvNormLayer(
+ ch_in=pre_num_filters,
+ ch_out=out_channels[j],
+ filter_size=3,
+ stride=2,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act="relu",
+ name=name + '_layer_' + str(i + 1) + '_' +
+ str(j + 1) + '_' + str(k + 1)))
+ pre_num_filters = out_channels[j]
+ self.residual_func_list.append(residual_func)
+
+ def forward(self, input):
+ outs = []
+ residual_func_idx = 0
+ for i in range(self._actual_ch):
+ residual = input[i]
+ for j in range(len(self._in_channels)):
+ if j > i:
+ y = self.residual_func_list[residual_func_idx](input[j])
+ residual_func_idx += 1
+ y = F.interpolate(y, scale_factor=2**(j - i))
+ residual = paddle.add(x=residual, y=y)
+ elif j < i:
+ y = input[j]
+ for k in range(i - j):
+ y = self.residual_func_list[residual_func_idx](y)
+ residual_func_idx += 1
+
+ residual = paddle.add(x=residual, y=y)
+ residual = F.relu(residual)
+ outs.append(residual)
+
+ return outs
+
+
+@register
+class HRNet(nn.Layer):
+ """
+ HRNet, see https://arxiv.org/abs/1908.07919
+
+ Args:
+ width (int): the width of HRNet
+ has_se (bool): whether to add SE block for each stage
+ freeze_at (int): the stage to freeze
+ freeze_norm (bool): whether to freeze norm in HRNet
+ norm_decay (float): weight decay for normalization layer weights
+ return_idx (List): the stage to return
+ upsample (bool): whether to upsample and concat the backbone feats
+ """
+
+ def __init__(self,
+ width=18,
+ has_se=False,
+ freeze_at=0,
+ freeze_norm=True,
+ norm_decay=0.,
+ return_idx=[0, 1, 2, 3],
+ upsample=False):
+ super(HRNet, self).__init__()
+
+ self.width = width
+ self.has_se = has_se
+ if isinstance(return_idx, Integral):
+ return_idx = [return_idx]
+
+ assert len(return_idx) > 0, "need one or more return index"
+ self.freeze_at = freeze_at
+ self.return_idx = return_idx
+ self.upsample = upsample
+
+ self.channels = {
+ 18: [[18, 36], [18, 36, 72], [18, 36, 72, 144]],
+ 30: [[30, 60], [30, 60, 120], [30, 60, 120, 240]],
+ 32: [[32, 64], [32, 64, 128], [32, 64, 128, 256]],
+ 40: [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
+ 44: [[44, 88], [44, 88, 176], [44, 88, 176, 352]],
+ 48: [[48, 96], [48, 96, 192], [48, 96, 192, 384]],
+ 60: [[60, 120], [60, 120, 240], [60, 120, 240, 480]],
+ 64: [[64, 128], [64, 128, 256], [64, 128, 256, 512]]
+ }
+
+ channels_2, channels_3, channels_4 = self.channels[width]
+ num_modules_2, num_modules_3, num_modules_4 = 1, 4, 3
+ self._out_channels = [sum(channels_4)] if self.upsample else channels_4
+ self._out_strides = [4] if self.upsample else [4, 8, 16, 32]
+
+ self.conv_layer1_1 = ConvNormLayer(
+ ch_in=3,
+ ch_out=64,
+ filter_size=3,
+ stride=2,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act='relu',
+ name="layer1_1")
+
+ self.conv_layer1_2 = ConvNormLayer(
+ ch_in=64,
+ ch_out=64,
+ filter_size=3,
+ stride=2,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ act='relu',
+ name="layer1_2")
+
+ self.la1 = Layer1(
+ num_channels=64,
+ has_se=has_se,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="layer2")
+
+ self.tr1 = TransitionLayer(
+ in_channels=[256],
+ out_channels=channels_2,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="tr1")
+
+ self.st2 = Stage(
+ num_channels=channels_2,
+ num_modules=num_modules_2,
+ num_filters=channels_2,
+ has_se=self.has_se,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="st2")
+
+ self.tr2 = TransitionLayer(
+ in_channels=channels_2,
+ out_channels=channels_3,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="tr2")
+
+ self.st3 = Stage(
+ num_channels=channels_3,
+ num_modules=num_modules_3,
+ num_filters=channels_3,
+ has_se=self.has_se,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="st3")
+
+ self.tr3 = TransitionLayer(
+ in_channels=channels_3,
+ out_channels=channels_4,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="tr3")
+ self.st4 = Stage(
+ num_channels=channels_4,
+ num_modules=num_modules_4,
+ num_filters=channels_4,
+ has_se=self.has_se,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ multi_scale_output=len(return_idx) > 1,
+ name="st4")
+
+ def forward(self, inputs):
+ x = inputs['image']
+ conv1 = self.conv_layer1_1(x)
+ conv2 = self.conv_layer1_2(conv1)
+
+ la1 = self.la1(conv2)
+ tr1 = self.tr1([la1])
+ st2 = self.st2(tr1)
+ tr2 = self.tr2(st2)
+
+ st3 = self.st3(tr2)
+ tr3 = self.tr3(st3)
+
+ st4 = self.st4(tr3)
+
+ if self.upsample:
+ # Upsampling
+ x0_h, x0_w = st4[0].shape[2:4]
+ x1 = F.upsample(st4[1], size=(x0_h, x0_w), mode='bilinear')
+ x2 = F.upsample(st4[2], size=(x0_h, x0_w), mode='bilinear')
+ x3 = F.upsample(st4[3], size=(x0_h, x0_w), mode='bilinear')
+ x = paddle.concat([st4[0], x1, x2, x3], 1)
+ return x
+
+ res = []
+ for i, layer in enumerate(st4):
+ if i == self.freeze_at:
+ layer.stop_gradient = True
+ if i in self.return_idx:
+ res.append(layer)
+
+ return res
+
+ @property
+ def out_shape(self):
+ if self.upsample:
+ self.return_idx = [0]
+ return [
+ ShapeSpec(
+ channels=self._out_channels[i], stride=self._out_strides[i])
+ for i in self.return_idx
+ ]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/lcnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/lcnet.py
new file mode 100644
index 000000000..fd8ad4e46
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/lcnet.py
@@ -0,0 +1,258 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+from paddle import ParamAttr
+from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Dropout, Linear
+from paddle.regularizer import L2Decay
+from paddle.nn.initializer import KaimingNormal
+
+from ppdet.core.workspace import register, serializable
+from numbers import Integral
+from ..shape_spec import ShapeSpec
+
+__all__ = ['LCNet']
+
+NET_CONFIG = {
+ "blocks2":
+ #k, in_c, out_c, s, use_se
+ [[3, 16, 32, 1, False], ],
+ "blocks3": [
+ [3, 32, 64, 2, False],
+ [3, 64, 64, 1, False],
+ ],
+ "blocks4": [
+ [3, 64, 128, 2, False],
+ [3, 128, 128, 1, False],
+ ],
+ "blocks5": [
+ [3, 128, 256, 2, False],
+ [5, 256, 256, 1, False],
+ [5, 256, 256, 1, False],
+ [5, 256, 256, 1, False],
+ [5, 256, 256, 1, False],
+ [5, 256, 256, 1, False],
+ ],
+ "blocks6": [[5, 256, 512, 2, True], [5, 512, 512, 1, True]]
+}
+
+
+def make_divisible(v, divisor=8, min_value=None):
+ if min_value is None:
+ min_value = divisor
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
+ if new_v < 0.9 * v:
+ new_v += divisor
+ return new_v
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ num_channels,
+ filter_size,
+ num_filters,
+ stride,
+ num_groups=1):
+ super().__init__()
+
+ self.conv = Conv2D(
+ in_channels=num_channels,
+ out_channels=num_filters,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ groups=num_groups,
+ weight_attr=ParamAttr(initializer=KaimingNormal()),
+ bias_attr=False)
+
+ self.bn = BatchNorm(
+ num_filters,
+ param_attr=ParamAttr(regularizer=L2Decay(0.0)),
+ bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
+ self.hardswish = nn.Hardswish()
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ x = self.hardswish(x)
+ return x
+
+
+class DepthwiseSeparable(nn.Layer):
+ def __init__(self,
+ num_channels,
+ num_filters,
+ stride,
+ dw_size=3,
+ use_se=False):
+ super().__init__()
+ self.use_se = use_se
+ self.dw_conv = ConvBNLayer(
+ num_channels=num_channels,
+ num_filters=num_channels,
+ filter_size=dw_size,
+ stride=stride,
+ num_groups=num_channels)
+ if use_se:
+ self.se = SEModule(num_channels)
+ self.pw_conv = ConvBNLayer(
+ num_channels=num_channels,
+ filter_size=1,
+ num_filters=num_filters,
+ stride=1)
+
+ def forward(self, x):
+ x = self.dw_conv(x)
+ if self.use_se:
+ x = self.se(x)
+ x = self.pw_conv(x)
+ return x
+
+
+class SEModule(nn.Layer):
+ def __init__(self, channel, reduction=4):
+ super().__init__()
+ self.avg_pool = AdaptiveAvgPool2D(1)
+ self.conv1 = Conv2D(
+ in_channels=channel,
+ out_channels=channel // reduction,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self.relu = nn.ReLU()
+ self.conv2 = Conv2D(
+ in_channels=channel // reduction,
+ out_channels=channel,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self.hardsigmoid = nn.Hardsigmoid()
+
+ def forward(self, x):
+ identity = x
+ x = self.avg_pool(x)
+ x = self.conv1(x)
+ x = self.relu(x)
+ x = self.conv2(x)
+ x = self.hardsigmoid(x)
+ x = paddle.multiply(x=identity, y=x)
+ return x
+
+
+@register
+@serializable
+class LCNet(nn.Layer):
+ def __init__(self, scale=1.0, feature_maps=[3, 4, 5]):
+ super().__init__()
+ self.scale = scale
+ self.feature_maps = feature_maps
+
+ out_channels = []
+
+ self.conv1 = ConvBNLayer(
+ num_channels=3,
+ filter_size=3,
+ num_filters=make_divisible(16 * scale),
+ stride=2)
+
+ self.blocks2 = nn.Sequential(* [
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks2"])
+ ])
+
+ self.blocks3 = nn.Sequential(* [
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks3"])
+ ])
+
+ out_channels.append(
+ make_divisible(NET_CONFIG["blocks3"][-1][2] * scale))
+
+ self.blocks4 = nn.Sequential(* [
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks4"])
+ ])
+
+ out_channels.append(
+ make_divisible(NET_CONFIG["blocks4"][-1][2] * scale))
+
+ self.blocks5 = nn.Sequential(* [
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks5"])
+ ])
+
+ out_channels.append(
+ make_divisible(NET_CONFIG["blocks5"][-1][2] * scale))
+
+ self.blocks6 = nn.Sequential(* [
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks6"])
+ ])
+
+ out_channels.append(
+ make_divisible(NET_CONFIG["blocks6"][-1][2] * scale))
+ self._out_channels = [
+ ch for idx, ch in enumerate(out_channels) if idx + 2 in feature_maps
+ ]
+
+ def forward(self, inputs):
+ x = inputs['image']
+ outs = []
+
+ x = self.conv1(x)
+ x = self.blocks2(x)
+ x = self.blocks3(x)
+ outs.append(x)
+ x = self.blocks4(x)
+ outs.append(x)
+ x = self.blocks5(x)
+ outs.append(x)
+ x = self.blocks6(x)
+ outs.append(x)
+ outs = [o for i, o in enumerate(outs) if i + 2 in self.feature_maps]
+ return outs
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/lite_hrnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/lite_hrnet.py
new file mode 100644
index 000000000..f14aae8e2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/lite_hrnet.py
@@ -0,0 +1,881 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+
+from numbers import Integral
+from paddle import ParamAttr
+from paddle.regularizer import L2Decay
+from paddle.nn.initializer import Normal, Constant
+from ppdet.core.workspace import register
+from ppdet.modeling.shape_spec import ShapeSpec
+from ppdet.modeling.ops import channel_shuffle
+from .. import layers as L
+
+__all__ = ['LiteHRNet']
+
+
+class ConvNormLayer(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ filter_size,
+ stride=1,
+ groups=1,
+ norm_type=None,
+ norm_groups=32,
+ norm_decay=0.,
+ freeze_norm=False,
+ act=None):
+ super(ConvNormLayer, self).__init__()
+ self.act = act
+ norm_lr = 0. if freeze_norm else 1.
+ if norm_type is not None:
+ assert norm_type in ['bn', 'sync_bn', 'gn'],\
+ "norm_type should be one of ['bn', 'sync_bn', 'gn'], but got {}".format(norm_type)
+ param_attr = ParamAttr(
+ initializer=Constant(1.0),
+ learning_rate=norm_lr,
+ regularizer=L2Decay(norm_decay), )
+ bias_attr = ParamAttr(
+ learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
+ global_stats = True if freeze_norm else False
+ if norm_type in ['bn', 'sync_bn']:
+ self.norm = nn.BatchNorm(
+ ch_out,
+ param_attr=param_attr,
+ bias_attr=bias_attr,
+ use_global_stats=global_stats, )
+ elif norm_type == 'gn':
+ self.norm = nn.GroupNorm(
+ num_groups=norm_groups,
+ num_channels=ch_out,
+ weight_attr=param_attr,
+ bias_attr=bias_attr)
+ norm_params = self.norm.parameters()
+ if freeze_norm:
+ for param in norm_params:
+ param.stop_gradient = True
+ conv_bias_attr = False
+ else:
+ conv_bias_attr = True
+ self.norm = None
+
+ self.conv = nn.Conv2D(
+ in_channels=ch_in,
+ out_channels=ch_out,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.001)),
+ bias_attr=conv_bias_attr)
+
+ def forward(self, inputs):
+ out = self.conv(inputs)
+ if self.norm is not None:
+ out = self.norm(out)
+
+ if self.act == 'relu':
+ out = F.relu(out)
+ elif self.act == 'sigmoid':
+ out = F.sigmoid(out)
+ return out
+
+
+class DepthWiseSeparableConvNormLayer(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ filter_size,
+ stride=1,
+ dw_norm_type=None,
+ pw_norm_type=None,
+ norm_decay=0.,
+ freeze_norm=False,
+ dw_act=None,
+ pw_act=None):
+ super(DepthWiseSeparableConvNormLayer, self).__init__()
+ self.depthwise_conv = ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_in,
+ filter_size=filter_size,
+ stride=stride,
+ groups=ch_in,
+ norm_type=dw_norm_type,
+ act=dw_act,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm, )
+ self.pointwise_conv = ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ filter_size=1,
+ stride=1,
+ norm_type=pw_norm_type,
+ act=pw_act,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm, )
+
+ def forward(self, x):
+ x = self.depthwise_conv(x)
+ x = self.pointwise_conv(x)
+ return x
+
+
+class CrossResolutionWeightingModule(nn.Layer):
+ def __init__(self,
+ channels,
+ ratio=16,
+ norm_type='bn',
+ freeze_norm=False,
+ norm_decay=0.):
+ super(CrossResolutionWeightingModule, self).__init__()
+ self.channels = channels
+ total_channel = sum(channels)
+ self.conv1 = ConvNormLayer(
+ ch_in=total_channel,
+ ch_out=total_channel // ratio,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ act='relu',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+ self.conv2 = ConvNormLayer(
+ ch_in=total_channel // ratio,
+ ch_out=total_channel,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ act='sigmoid',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+
+ def forward(self, x):
+ mini_size = x[-1].shape[-2:]
+ out = [F.adaptive_avg_pool2d(s, mini_size) for s in x[:-1]] + [x[-1]]
+ out = paddle.concat(out, 1)
+ out = self.conv1(out)
+ out = self.conv2(out)
+ out = paddle.split(out, self.channels, 1)
+ out = [
+ s * F.interpolate(
+ a, s.shape[-2:], mode='nearest') for s, a in zip(x, out)
+ ]
+ return out
+
+
+class SpatialWeightingModule(nn.Layer):
+ def __init__(self, in_channel, ratio=16, freeze_norm=False, norm_decay=0.):
+ super(SpatialWeightingModule, self).__init__()
+ self.global_avgpooling = nn.AdaptiveAvgPool2D(1)
+ self.conv1 = ConvNormLayer(
+ ch_in=in_channel,
+ ch_out=in_channel // ratio,
+ filter_size=1,
+ stride=1,
+ act='relu',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+ self.conv2 = ConvNormLayer(
+ ch_in=in_channel // ratio,
+ ch_out=in_channel,
+ filter_size=1,
+ stride=1,
+ act='sigmoid',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+
+ def forward(self, x):
+ out = self.global_avgpooling(x)
+ out = self.conv1(out)
+ out = self.conv2(out)
+ return x * out
+
+
+class ConditionalChannelWeightingBlock(nn.Layer):
+ def __init__(self,
+ in_channels,
+ stride,
+ reduce_ratio,
+ norm_type='bn',
+ freeze_norm=False,
+ norm_decay=0.):
+ super(ConditionalChannelWeightingBlock, self).__init__()
+ assert stride in [1, 2]
+ branch_channels = [channel // 2 for channel in in_channels]
+
+ self.cross_resolution_weighting = CrossResolutionWeightingModule(
+ branch_channels,
+ ratio=reduce_ratio,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+ self.depthwise_convs = nn.LayerList([
+ ConvNormLayer(
+ channel,
+ channel,
+ filter_size=3,
+ stride=stride,
+ groups=channel,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay) for channel in branch_channels
+ ])
+
+ self.spatial_weighting = nn.LayerList([
+ SpatialWeightingModule(
+ channel,
+ ratio=4,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay) for channel in branch_channels
+ ])
+
+ def forward(self, x):
+ x = [s.chunk(2, axis=1) for s in x]
+ x1 = [s[0] for s in x]
+ x2 = [s[1] for s in x]
+
+ x2 = self.cross_resolution_weighting(x2)
+ x2 = [dw(s) for s, dw in zip(x2, self.depthwise_convs)]
+ x2 = [sw(s) for s, sw in zip(x2, self.spatial_weighting)]
+
+ out = [paddle.concat([s1, s2], axis=1) for s1, s2 in zip(x1, x2)]
+ out = [channel_shuffle(s, groups=2) for s in out]
+ return out
+
+
+class ShuffleUnit(nn.Layer):
+ def __init__(self,
+ in_channel,
+ out_channel,
+ stride,
+ norm_type='bn',
+ freeze_norm=False,
+ norm_decay=0.):
+ super(ShuffleUnit, self).__init__()
+ branch_channel = out_channel // 2
+ self.stride = stride
+ if self.stride == 1:
+ assert in_channel == branch_channel * 2,\
+ "when stride=1, in_channel {} should equal to branch_channel*2 {}".format(in_channel, branch_channel * 2)
+ if stride > 1:
+ self.branch1 = nn.Sequential(
+ ConvNormLayer(
+ ch_in=in_channel,
+ ch_out=in_channel,
+ filter_size=3,
+ stride=self.stride,
+ groups=in_channel,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay),
+ ConvNormLayer(
+ ch_in=in_channel,
+ ch_out=branch_channel,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ act='relu',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay), )
+ self.branch2 = nn.Sequential(
+ ConvNormLayer(
+ ch_in=branch_channel if stride == 1 else in_channel,
+ ch_out=branch_channel,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ act='relu',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay),
+ ConvNormLayer(
+ ch_in=branch_channel,
+ ch_out=branch_channel,
+ filter_size=3,
+ stride=self.stride,
+ groups=branch_channel,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay),
+ ConvNormLayer(
+ ch_in=branch_channel,
+ ch_out=branch_channel,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ act='relu',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay), )
+
+ def forward(self, x):
+ if self.stride > 1:
+ x1 = self.branch1(x)
+ x2 = self.branch2(x)
+ else:
+ x1, x2 = x.chunk(2, axis=1)
+ x2 = self.branch2(x2)
+ out = paddle.concat([x1, x2], axis=1)
+ out = channel_shuffle(out, groups=2)
+ return out
+
+
+class IterativeHead(nn.Layer):
+ def __init__(self,
+ in_channels,
+ norm_type='bn',
+ freeze_norm=False,
+ norm_decay=0.):
+ super(IterativeHead, self).__init__()
+ num_branches = len(in_channels)
+ self.in_channels = in_channels[::-1]
+
+ projects = []
+ for i in range(num_branches):
+ if i != num_branches - 1:
+ projects.append(
+ DepthWiseSeparableConvNormLayer(
+ ch_in=self.in_channels[i],
+ ch_out=self.in_channels[i + 1],
+ filter_size=3,
+ stride=1,
+ dw_act=None,
+ pw_act='relu',
+ dw_norm_type=norm_type,
+ pw_norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay))
+ else:
+ projects.append(
+ DepthWiseSeparableConvNormLayer(
+ ch_in=self.in_channels[i],
+ ch_out=self.in_channels[i],
+ filter_size=3,
+ stride=1,
+ dw_act=None,
+ pw_act='relu',
+ dw_norm_type=norm_type,
+ pw_norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay))
+ self.projects = nn.LayerList(projects)
+
+ def forward(self, x):
+ x = x[::-1]
+ y = []
+ last_x = None
+ for i, s in enumerate(x):
+ if last_x is not None:
+ last_x = F.interpolate(
+ last_x,
+ size=s.shape[-2:],
+ mode='bilinear',
+ align_corners=True)
+ s = s + last_x
+ s = self.projects[i](s)
+ y.append(s)
+ last_x = s
+
+ return y[::-1]
+
+
+class Stem(nn.Layer):
+ def __init__(self,
+ in_channel,
+ stem_channel,
+ out_channel,
+ expand_ratio,
+ norm_type='bn',
+ freeze_norm=False,
+ norm_decay=0.):
+ super(Stem, self).__init__()
+ self.conv1 = ConvNormLayer(
+ in_channel,
+ stem_channel,
+ filter_size=3,
+ stride=2,
+ norm_type=norm_type,
+ act='relu',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+ mid_channel = int(round(stem_channel * expand_ratio))
+ branch_channel = stem_channel // 2
+ if stem_channel == out_channel:
+ inc_channel = out_channel - branch_channel
+ else:
+ inc_channel = out_channel - stem_channel
+ self.branch1 = nn.Sequential(
+ ConvNormLayer(
+ ch_in=branch_channel,
+ ch_out=branch_channel,
+ filter_size=3,
+ stride=2,
+ groups=branch_channel,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay),
+ ConvNormLayer(
+ ch_in=branch_channel,
+ ch_out=inc_channel,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ act='relu',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay), )
+ self.expand_conv = ConvNormLayer(
+ ch_in=branch_channel,
+ ch_out=mid_channel,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ act='relu',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+ self.depthwise_conv = ConvNormLayer(
+ ch_in=mid_channel,
+ ch_out=mid_channel,
+ filter_size=3,
+ stride=2,
+ groups=mid_channel,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+ self.linear_conv = ConvNormLayer(
+ ch_in=mid_channel,
+ ch_out=branch_channel
+ if stem_channel == out_channel else stem_channel,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ act='relu',
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x1, x2 = x.chunk(2, axis=1)
+ x1 = self.branch1(x1)
+ x2 = self.expand_conv(x2)
+ x2 = self.depthwise_conv(x2)
+ x2 = self.linear_conv(x2)
+ out = paddle.concat([x1, x2], axis=1)
+ out = channel_shuffle(out, groups=2)
+
+ return out
+
+
+class LiteHRNetModule(nn.Layer):
+ def __init__(self,
+ num_branches,
+ num_blocks,
+ in_channels,
+ reduce_ratio,
+ module_type,
+ multiscale_output=False,
+ with_fuse=True,
+ norm_type='bn',
+ freeze_norm=False,
+ norm_decay=0.):
+ super(LiteHRNetModule, self).__init__()
+ assert num_branches == len(in_channels),\
+ "num_branches {} should equal to num_in_channels {}".format(num_branches, len(in_channels))
+ assert module_type in ['LITE', 'NAIVE'],\
+ "module_type should be one of ['LITE', 'NAIVE']"
+ self.num_branches = num_branches
+ self.in_channels = in_channels
+ self.multiscale_output = multiscale_output
+ self.with_fuse = with_fuse
+ self.norm_type = 'bn'
+ self.module_type = module_type
+
+ if self.module_type == 'LITE':
+ self.layers = self._make_weighting_blocks(
+ num_blocks,
+ reduce_ratio,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+ elif self.module_type == 'NAIVE':
+ self.layers = self._make_naive_branches(
+ num_branches,
+ num_blocks,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay)
+
+ if self.with_fuse:
+ self.fuse_layers = self._make_fuse_layers(
+ freeze_norm=freeze_norm, norm_decay=norm_decay)
+ self.relu = nn.ReLU()
+
+ def _make_weighting_blocks(self,
+ num_blocks,
+ reduce_ratio,
+ stride=1,
+ freeze_norm=False,
+ norm_decay=0.):
+ layers = []
+ for i in range(num_blocks):
+ layers.append(
+ ConditionalChannelWeightingBlock(
+ self.in_channels,
+ stride=stride,
+ reduce_ratio=reduce_ratio,
+ norm_type=self.norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay))
+ return nn.Sequential(*layers)
+
+ def _make_naive_branches(self,
+ num_branches,
+ num_blocks,
+ freeze_norm=False,
+ norm_decay=0.):
+ branches = []
+ for branch_idx in range(num_branches):
+ layers = []
+ for i in range(num_blocks):
+ layers.append(
+ ShuffleUnit(
+ self.in_channels[branch_idx],
+ self.in_channels[branch_idx],
+ stride=1,
+ norm_type=self.norm_type,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay))
+ branches.append(nn.Sequential(*layers))
+ return nn.LayerList(branches)
+
+ def _make_fuse_layers(self, freeze_norm=False, norm_decay=0.):
+ if self.num_branches == 1:
+ return None
+ fuse_layers = []
+ num_out_branches = self.num_branches if self.multiscale_output else 1
+ for i in range(num_out_branches):
+ fuse_layer = []
+ for j in range(self.num_branches):
+ if j > i:
+ fuse_layer.append(
+ nn.Sequential(
+ L.Conv2d(
+ self.in_channels[j],
+ self.in_channels[i],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False, ),
+ nn.BatchNorm(self.in_channels[i]),
+ nn.Upsample(
+ scale_factor=2**(j - i), mode='nearest')))
+ elif j == i:
+ fuse_layer.append(None)
+ else:
+ conv_downsamples = []
+ for k in range(i - j):
+ if k == i - j - 1:
+ conv_downsamples.append(
+ nn.Sequential(
+ L.Conv2d(
+ self.in_channels[j],
+ self.in_channels[j],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ groups=self.in_channels[j],
+ bias=False, ),
+ nn.BatchNorm(self.in_channels[j]),
+ L.Conv2d(
+ self.in_channels[j],
+ self.in_channels[i],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False, ),
+ nn.BatchNorm(self.in_channels[i])))
+ else:
+ conv_downsamples.append(
+ nn.Sequential(
+ L.Conv2d(
+ self.in_channels[j],
+ self.in_channels[j],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ groups=self.in_channels[j],
+ bias=False, ),
+ nn.BatchNorm(self.in_channels[j]),
+ L.Conv2d(
+ self.in_channels[j],
+ self.in_channels[j],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False, ),
+ nn.BatchNorm(self.in_channels[j]),
+ nn.ReLU()))
+
+ fuse_layer.append(nn.Sequential(*conv_downsamples))
+ fuse_layers.append(nn.LayerList(fuse_layer))
+
+ return nn.LayerList(fuse_layers)
+
+ def forward(self, x):
+ if self.num_branches == 1:
+ return [self.layers[0](x[0])]
+ if self.module_type == 'LITE':
+ out = self.layers(x)
+ elif self.module_type == 'NAIVE':
+ for i in range(self.num_branches):
+ x[i] = self.layers[i](x[i])
+ out = x
+ if self.with_fuse:
+ out_fuse = []
+ for i in range(len(self.fuse_layers)):
+ y = out[0] if i == 0 else self.fuse_layers[i][0](out[0])
+ for j in range(self.num_branches):
+ if j == 0:
+ y += y
+ elif i == j:
+ y += out[j]
+ else:
+ y += self.fuse_layers[i][j](out[j])
+ if i == 0:
+ out[i] = y
+ out_fuse.append(self.relu(y))
+ out = out_fuse
+ elif not self.multiscale_output:
+ out = [out[0]]
+ return out
+
+
+@register
+class LiteHRNet(nn.Layer):
+ """
+ @inproceedings{Yulitehrnet21,
+ title={Lite-HRNet: A Lightweight High-Resolution Network},
+ author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong},
+ booktitle={CVPR},year={2021}
+ }
+ Args:
+ network_type (str): the network_type should be one of ["lite_18", "lite_30", "naive", "wider_naive"],
+ "naive": Simply combining the shuffle block in ShuffleNet and the highresolution design pattern in HRNet.
+ "wider_naive": Naive network with wider channels in each block.
+ "lite_18": Lite-HRNet-18, which replaces the pointwise convolution in a shuffle block by conditional channel weighting.
+ "lite_30": Lite-HRNet-30, with more blocks compared with Lite-HRNet-18.
+ freeze_at (int): the stage to freeze
+ freeze_norm (bool): whether to freeze norm in HRNet
+ norm_decay (float): weight decay for normalization layer weights
+ return_idx (List): the stage to return
+ """
+
+ def __init__(self,
+ network_type,
+ freeze_at=0,
+ freeze_norm=True,
+ norm_decay=0.,
+ return_idx=[0, 1, 2, 3]):
+ super(LiteHRNet, self).__init__()
+ if isinstance(return_idx, Integral):
+ return_idx = [return_idx]
+ assert network_type in ["lite_18", "lite_30", "naive", "wider_naive"],\
+ "the network_type should be one of [lite_18, lite_30, naive, wider_naive]"
+ assert len(return_idx) > 0, "need one or more return index"
+ self.freeze_at = freeze_at
+ self.freeze_norm = freeze_norm
+ self.norm_decay = norm_decay
+ self.return_idx = return_idx
+ self.norm_type = 'bn'
+
+ self.module_configs = {
+ "lite_18": {
+ "num_modules": [2, 4, 2],
+ "num_branches": [2, 3, 4],
+ "num_blocks": [2, 2, 2],
+ "module_type": ["LITE", "LITE", "LITE"],
+ "reduce_ratios": [8, 8, 8],
+ "num_channels": [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
+ },
+ "lite_30": {
+ "num_modules": [3, 8, 3],
+ "num_branches": [2, 3, 4],
+ "num_blocks": [2, 2, 2],
+ "module_type": ["LITE", "LITE", "LITE"],
+ "reduce_ratios": [8, 8, 8],
+ "num_channels": [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
+ },
+ "naive": {
+ "num_modules": [2, 4, 2],
+ "num_branches": [2, 3, 4],
+ "num_blocks": [2, 2, 2],
+ "module_type": ["NAIVE", "NAIVE", "NAIVE"],
+ "reduce_ratios": [1, 1, 1],
+ "num_channels": [[30, 60], [30, 60, 120], [30, 60, 120, 240]],
+ },
+ "wider_naive": {
+ "num_modules": [2, 4, 2],
+ "num_branches": [2, 3, 4],
+ "num_blocks": [2, 2, 2],
+ "module_type": ["NAIVE", "NAIVE", "NAIVE"],
+ "reduce_ratios": [1, 1, 1],
+ "num_channels": [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
+ },
+ }
+
+ self.stages_config = self.module_configs[network_type]
+
+ self.stem = Stem(3, 32, 32, 1)
+ num_channels_pre_layer = [32]
+ for stage_idx in range(3):
+ num_channels = self.stages_config["num_channels"][stage_idx]
+ setattr(self, 'transition{}'.format(stage_idx),
+ self._make_transition_layer(num_channels_pre_layer,
+ num_channels, self.freeze_norm,
+ self.norm_decay))
+ stage, num_channels_pre_layer = self._make_stage(
+ self.stages_config, stage_idx, num_channels, True,
+ self.freeze_norm, self.norm_decay)
+ setattr(self, 'stage{}'.format(stage_idx), stage)
+ self.head_layer = IterativeHead(num_channels_pre_layer, 'bn',
+ self.freeze_norm, self.norm_decay)
+
+ def _make_transition_layer(self,
+ num_channels_pre_layer,
+ num_channels_cur_layer,
+ freeze_norm=False,
+ norm_decay=0.):
+ num_branches_pre = len(num_channels_pre_layer)
+ num_branches_cur = len(num_channels_cur_layer)
+ transition_layers = []
+ for i in range(num_branches_cur):
+ if i < num_branches_pre:
+ if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
+ transition_layers.append(
+ nn.Sequential(
+ L.Conv2d(
+ num_channels_pre_layer[i],
+ num_channels_pre_layer[i],
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ groups=num_channels_pre_layer[i],
+ bias=False),
+ nn.BatchNorm(num_channels_pre_layer[i]),
+ L.Conv2d(
+ num_channels_pre_layer[i],
+ num_channels_cur_layer[i],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False, ),
+ nn.BatchNorm(num_channels_cur_layer[i]),
+ nn.ReLU()))
+ else:
+ transition_layers.append(None)
+ else:
+ conv_downsamples = []
+ for j in range(i + 1 - num_branches_pre):
+ conv_downsamples.append(
+ nn.Sequential(
+ L.Conv2d(
+ num_channels_pre_layer[-1],
+ num_channels_pre_layer[-1],
+ groups=num_channels_pre_layer[-1],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False, ),
+ nn.BatchNorm(num_channels_pre_layer[-1]),
+ L.Conv2d(
+ num_channels_pre_layer[-1],
+ num_channels_cur_layer[i]
+ if j == i - num_branches_pre else
+ num_channels_pre_layer[-1],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False, ),
+ nn.BatchNorm(num_channels_cur_layer[i]
+ if j == i - num_branches_pre else
+ num_channels_pre_layer[-1]),
+ nn.ReLU()))
+ transition_layers.append(nn.Sequential(*conv_downsamples))
+ return nn.LayerList(transition_layers)
+
+ def _make_stage(self,
+ stages_config,
+ stage_idx,
+ in_channels,
+ multiscale_output,
+ freeze_norm=False,
+ norm_decay=0.):
+ num_modules = stages_config["num_modules"][stage_idx]
+ num_branches = stages_config["num_branches"][stage_idx]
+ num_blocks = stages_config["num_blocks"][stage_idx]
+ reduce_ratio = stages_config['reduce_ratios'][stage_idx]
+ module_type = stages_config['module_type'][stage_idx]
+
+ modules = []
+ for i in range(num_modules):
+ if not multiscale_output and i == num_modules - 1:
+ reset_multiscale_output = False
+ else:
+ reset_multiscale_output = True
+ modules.append(
+ LiteHRNetModule(
+ num_branches,
+ num_blocks,
+ in_channels,
+ reduce_ratio,
+ module_type,
+ multiscale_output=reset_multiscale_output,
+ with_fuse=True,
+ freeze_norm=freeze_norm,
+ norm_decay=norm_decay))
+ in_channels = modules[-1].in_channels
+ return nn.Sequential(*modules), in_channels
+
+ def forward(self, inputs):
+ x = inputs['image']
+ x = self.stem(x)
+ y_list = [x]
+ for stage_idx in range(3):
+ x_list = []
+ transition = getattr(self, 'transition{}'.format(stage_idx))
+ for j in range(self.stages_config["num_branches"][stage_idx]):
+ if transition[j] is not None:
+ if j >= len(y_list):
+ x_list.append(transition[j](y_list[-1]))
+ else:
+ x_list.append(transition[j](y_list[j]))
+ else:
+ x_list.append(y_list[j])
+ y_list = getattr(self, 'stage{}'.format(stage_idx))(x_list)
+ x = self.head_layer(y_list)
+ res = []
+ for i, layer in enumerate(x):
+ if i == self.freeze_at:
+ layer.stop_gradient = True
+ if i in self.return_idx:
+ res.append(layer)
+ return res
+
+ @property
+ def out_shape(self):
+ return [
+ ShapeSpec(
+ channels=self._out_channels[i], stride=self._out_strides[i])
+ for i in self.return_idx
+ ]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/mobilenet_v1.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/mobilenet_v1.py
new file mode 100644
index 000000000..8cf602832
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/mobilenet_v1.py
@@ -0,0 +1,409 @@
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.regularizer import L2Decay
+from paddle.nn.initializer import KaimingNormal
+from ppdet.core.workspace import register, serializable
+from numbers import Integral
+from ..shape_spec import ShapeSpec
+
+__all__ = ['MobileNet']
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ padding,
+ num_groups=1,
+ act='relu',
+ conv_lr=1.,
+ conv_decay=0.,
+ norm_decay=0.,
+ norm_type='bn',
+ name=None):
+ super(ConvBNLayer, self).__init__()
+ self.act = act
+ self._conv = nn.Conv2D(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ groups=num_groups,
+ weight_attr=ParamAttr(
+ learning_rate=conv_lr,
+ initializer=KaimingNormal(),
+ regularizer=L2Decay(conv_decay)),
+ bias_attr=False)
+
+ param_attr = ParamAttr(regularizer=L2Decay(norm_decay))
+ bias_attr = ParamAttr(regularizer=L2Decay(norm_decay))
+ if norm_type == 'sync_bn':
+ self._batch_norm = nn.SyncBatchNorm(
+ out_channels, weight_attr=param_attr, bias_attr=bias_attr)
+ else:
+ self._batch_norm = nn.BatchNorm(
+ out_channels,
+ act=None,
+ param_attr=param_attr,
+ bias_attr=bias_attr,
+ use_global_stats=False)
+
+ def forward(self, x):
+ x = self._conv(x)
+ x = self._batch_norm(x)
+ if self.act == "relu":
+ x = F.relu(x)
+ elif self.act == "relu6":
+ x = F.relu6(x)
+ return x
+
+
+class DepthwiseSeparable(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels1,
+ out_channels2,
+ num_groups,
+ stride,
+ scale,
+ conv_lr=1.,
+ conv_decay=0.,
+ norm_decay=0.,
+ norm_type='bn',
+ name=None):
+ super(DepthwiseSeparable, self).__init__()
+
+ self._depthwise_conv = ConvBNLayer(
+ in_channels,
+ int(out_channels1 * scale),
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ num_groups=int(num_groups * scale),
+ conv_lr=conv_lr,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name=name + "_dw")
+
+ self._pointwise_conv = ConvBNLayer(
+ int(out_channels1 * scale),
+ int(out_channels2 * scale),
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ conv_lr=conv_lr,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name=name + "_sep")
+
+ def forward(self, x):
+ x = self._depthwise_conv(x)
+ x = self._pointwise_conv(x)
+ return x
+
+
+class ExtraBlock(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels1,
+ out_channels2,
+ num_groups=1,
+ stride=2,
+ conv_lr=1.,
+ conv_decay=0.,
+ norm_decay=0.,
+ norm_type='bn',
+ name=None):
+ super(ExtraBlock, self).__init__()
+
+ self.pointwise_conv = ConvBNLayer(
+ in_channels,
+ int(out_channels1),
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ num_groups=int(num_groups),
+ act='relu6',
+ conv_lr=conv_lr,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name=name + "_extra1")
+
+ self.normal_conv = ConvBNLayer(
+ int(out_channels1),
+ int(out_channels2),
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ num_groups=int(num_groups),
+ act='relu6',
+ conv_lr=conv_lr,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name=name + "_extra2")
+
+ def forward(self, x):
+ x = self.pointwise_conv(x)
+ x = self.normal_conv(x)
+ return x
+
+
+@register
+@serializable
+class MobileNet(nn.Layer):
+ __shared__ = ['norm_type']
+
+ def __init__(self,
+ norm_type='bn',
+ norm_decay=0.,
+ conv_decay=0.,
+ scale=1,
+ conv_learning_rate=1.0,
+ feature_maps=[4, 6, 13],
+ with_extra_blocks=False,
+ extra_block_filters=[[256, 512], [128, 256], [128, 256],
+ [64, 128]]):
+ super(MobileNet, self).__init__()
+ if isinstance(feature_maps, Integral):
+ feature_maps = [feature_maps]
+ self.feature_maps = feature_maps
+ self.with_extra_blocks = with_extra_blocks
+ self.extra_block_filters = extra_block_filters
+
+ self._out_channels = []
+
+ self.conv1 = ConvBNLayer(
+ in_channels=3,
+ out_channels=int(32 * scale),
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv1")
+
+ self.dwsl = []
+ dws21 = self.add_sublayer(
+ "conv2_1",
+ sublayer=DepthwiseSeparable(
+ in_channels=int(32 * scale),
+ out_channels1=32,
+ out_channels2=64,
+ num_groups=32,
+ stride=1,
+ scale=scale,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv2_1"))
+ self.dwsl.append(dws21)
+ self._update_out_channels(64, len(self.dwsl), feature_maps)
+ dws22 = self.add_sublayer(
+ "conv2_2",
+ sublayer=DepthwiseSeparable(
+ in_channels=int(64 * scale),
+ out_channels1=64,
+ out_channels2=128,
+ num_groups=64,
+ stride=2,
+ scale=scale,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv2_2"))
+ self.dwsl.append(dws22)
+ self._update_out_channels(128, len(self.dwsl), feature_maps)
+ # 1/4
+ dws31 = self.add_sublayer(
+ "conv3_1",
+ sublayer=DepthwiseSeparable(
+ in_channels=int(128 * scale),
+ out_channels1=128,
+ out_channels2=128,
+ num_groups=128,
+ stride=1,
+ scale=scale,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv3_1"))
+ self.dwsl.append(dws31)
+ self._update_out_channels(128, len(self.dwsl), feature_maps)
+ dws32 = self.add_sublayer(
+ "conv3_2",
+ sublayer=DepthwiseSeparable(
+ in_channels=int(128 * scale),
+ out_channels1=128,
+ out_channels2=256,
+ num_groups=128,
+ stride=2,
+ scale=scale,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv3_2"))
+ self.dwsl.append(dws32)
+ self._update_out_channels(256, len(self.dwsl), feature_maps)
+ # 1/8
+ dws41 = self.add_sublayer(
+ "conv4_1",
+ sublayer=DepthwiseSeparable(
+ in_channels=int(256 * scale),
+ out_channels1=256,
+ out_channels2=256,
+ num_groups=256,
+ stride=1,
+ scale=scale,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv4_1"))
+ self.dwsl.append(dws41)
+ self._update_out_channels(256, len(self.dwsl), feature_maps)
+ dws42 = self.add_sublayer(
+ "conv4_2",
+ sublayer=DepthwiseSeparable(
+ in_channels=int(256 * scale),
+ out_channels1=256,
+ out_channels2=512,
+ num_groups=256,
+ stride=2,
+ scale=scale,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv4_2"))
+ self.dwsl.append(dws42)
+ self._update_out_channels(512, len(self.dwsl), feature_maps)
+ # 1/16
+ for i in range(5):
+ tmp = self.add_sublayer(
+ "conv5_" + str(i + 1),
+ sublayer=DepthwiseSeparable(
+ in_channels=512,
+ out_channels1=512,
+ out_channels2=512,
+ num_groups=512,
+ stride=1,
+ scale=scale,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv5_" + str(i + 1)))
+ self.dwsl.append(tmp)
+ self._update_out_channels(512, len(self.dwsl), feature_maps)
+ dws56 = self.add_sublayer(
+ "conv5_6",
+ sublayer=DepthwiseSeparable(
+ in_channels=int(512 * scale),
+ out_channels1=512,
+ out_channels2=1024,
+ num_groups=512,
+ stride=2,
+ scale=scale,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv5_6"))
+ self.dwsl.append(dws56)
+ self._update_out_channels(1024, len(self.dwsl), feature_maps)
+ # 1/32
+ dws6 = self.add_sublayer(
+ "conv6",
+ sublayer=DepthwiseSeparable(
+ in_channels=int(1024 * scale),
+ out_channels1=1024,
+ out_channels2=1024,
+ num_groups=1024,
+ stride=1,
+ scale=scale,
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv6"))
+ self.dwsl.append(dws6)
+ self._update_out_channels(1024, len(self.dwsl), feature_maps)
+
+ if self.with_extra_blocks:
+ self.extra_blocks = []
+ for i, block_filter in enumerate(self.extra_block_filters):
+ in_c = 1024 if i == 0 else self.extra_block_filters[i - 1][1]
+ conv_extra = self.add_sublayer(
+ "conv7_" + str(i + 1),
+ sublayer=ExtraBlock(
+ in_c,
+ block_filter[0],
+ block_filter[1],
+ conv_lr=conv_learning_rate,
+ conv_decay=conv_decay,
+ norm_decay=norm_decay,
+ norm_type=norm_type,
+ name="conv7_" + str(i + 1)))
+ self.extra_blocks.append(conv_extra)
+ self._update_out_channels(
+ block_filter[1],
+ len(self.dwsl) + len(self.extra_blocks), feature_maps)
+
+ def _update_out_channels(self, channel, feature_idx, feature_maps):
+ if feature_idx in feature_maps:
+ self._out_channels.append(channel)
+
+ def forward(self, inputs):
+ outs = []
+ y = self.conv1(inputs['image'])
+ for i, block in enumerate(self.dwsl):
+ y = block(y)
+ if i + 1 in self.feature_maps:
+ outs.append(y)
+
+ if not self.with_extra_blocks:
+ return outs
+
+ y = outs[-1]
+ for i, block in enumerate(self.extra_blocks):
+ idx = i + len(self.dwsl)
+ y = block(y)
+ if idx + 1 in self.feature_maps:
+ outs.append(y)
+ return outs
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/mobilenet_v3.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/mobilenet_v3.py
new file mode 100644
index 000000000..02021e87c
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/mobilenet_v3.py
@@ -0,0 +1,482 @@
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.regularizer import L2Decay
+from ppdet.core.workspace import register, serializable
+from numbers import Integral
+from ..shape_spec import ShapeSpec
+
+__all__ = ['MobileNetV3']
+
+
+def make_divisible(v, divisor=8, min_value=None):
+ if min_value is None:
+ min_value = divisor
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
+ if new_v < 0.9 * v:
+ new_v += divisor
+ return new_v
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ in_c,
+ out_c,
+ filter_size,
+ stride,
+ padding,
+ num_groups=1,
+ act=None,
+ lr_mult=1.,
+ conv_decay=0.,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ name=""):
+ super(ConvBNLayer, self).__init__()
+ self.act = act
+ self.conv = nn.Conv2D(
+ in_channels=in_c,
+ out_channels=out_c,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=padding,
+ groups=num_groups,
+ weight_attr=ParamAttr(
+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)),
+ bias_attr=False)
+
+ norm_lr = 0. if freeze_norm else lr_mult
+ param_attr = ParamAttr(
+ learning_rate=norm_lr,
+ regularizer=L2Decay(norm_decay),
+ trainable=False if freeze_norm else True)
+ bias_attr = ParamAttr(
+ learning_rate=norm_lr,
+ regularizer=L2Decay(norm_decay),
+ trainable=False if freeze_norm else True)
+ global_stats = True if freeze_norm else False
+ if norm_type == 'sync_bn':
+ self.bn = nn.SyncBatchNorm(
+ out_c, weight_attr=param_attr, bias_attr=bias_attr)
+ else:
+ self.bn = nn.BatchNorm(
+ out_c,
+ act=None,
+ param_attr=param_attr,
+ bias_attr=bias_attr,
+ use_global_stats=global_stats)
+ norm_params = self.bn.parameters()
+ if freeze_norm:
+ for param in norm_params:
+ param.stop_gradient = True
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ if self.act is not None:
+ if self.act == "relu":
+ x = F.relu(x)
+ elif self.act == "relu6":
+ x = F.relu6(x)
+ elif self.act == "hard_swish":
+ x = F.hardswish(x)
+ else:
+ raise NotImplementedError(
+ "The activation function is selected incorrectly.")
+ return x
+
+
+class ResidualUnit(nn.Layer):
+ def __init__(self,
+ in_c,
+ mid_c,
+ out_c,
+ filter_size,
+ stride,
+ use_se,
+ lr_mult,
+ conv_decay=0.,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ act=None,
+ return_list=False,
+ name=''):
+ super(ResidualUnit, self).__init__()
+ self.if_shortcut = stride == 1 and in_c == out_c
+ self.use_se = use_se
+ self.return_list = return_list
+
+ self.expand_conv = ConvBNLayer(
+ in_c=in_c,
+ out_c=mid_c,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ act=act,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_expand")
+ self.bottleneck_conv = ConvBNLayer(
+ in_c=mid_c,
+ out_c=mid_c,
+ filter_size=filter_size,
+ stride=stride,
+ padding=int((filter_size - 1) // 2),
+ num_groups=mid_c,
+ act=act,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_depthwise")
+ if self.use_se:
+ self.mid_se = SEModule(
+ mid_c, lr_mult, conv_decay, name=name + "_se")
+ self.linear_conv = ConvBNLayer(
+ in_c=mid_c,
+ out_c=out_c,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ act=None,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_linear")
+
+ def forward(self, inputs):
+ y = self.expand_conv(inputs)
+ x = self.bottleneck_conv(y)
+ if self.use_se:
+ x = self.mid_se(x)
+ x = self.linear_conv(x)
+ if self.if_shortcut:
+ x = paddle.add(inputs, x)
+ if self.return_list:
+ return [y, x]
+ else:
+ return x
+
+
+class SEModule(nn.Layer):
+ def __init__(self, channel, lr_mult, conv_decay, reduction=4, name=""):
+ super(SEModule, self).__init__()
+ self.avg_pool = nn.AdaptiveAvgPool2D(1)
+ mid_channels = int(channel // reduction)
+ self.conv1 = nn.Conv2D(
+ in_channels=channel,
+ out_channels=mid_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ weight_attr=ParamAttr(
+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)),
+ bias_attr=ParamAttr(
+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)))
+ self.conv2 = nn.Conv2D(
+ in_channels=mid_channels,
+ out_channels=channel,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ weight_attr=ParamAttr(
+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)),
+ bias_attr=ParamAttr(
+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)))
+
+ def forward(self, inputs):
+ outputs = self.avg_pool(inputs)
+ outputs = self.conv1(outputs)
+ outputs = F.relu(outputs)
+ outputs = self.conv2(outputs)
+ outputs = F.hardsigmoid(outputs, slope=0.2, offset=0.5)
+ return paddle.multiply(x=inputs, y=outputs)
+
+
+class ExtraBlockDW(nn.Layer):
+ def __init__(self,
+ in_c,
+ ch_1,
+ ch_2,
+ stride,
+ lr_mult,
+ conv_decay=0.,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ name=None):
+ super(ExtraBlockDW, self).__init__()
+ self.pointwise_conv = ConvBNLayer(
+ in_c=in_c,
+ out_c=ch_1,
+ filter_size=1,
+ stride=1,
+ padding='SAME',
+ act='relu6',
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_extra1")
+ self.depthwise_conv = ConvBNLayer(
+ in_c=ch_1,
+ out_c=ch_2,
+ filter_size=3,
+ stride=stride,
+ padding='SAME',
+ num_groups=int(ch_1),
+ act='relu6',
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_extra2_dw")
+ self.normal_conv = ConvBNLayer(
+ in_c=ch_2,
+ out_c=ch_2,
+ filter_size=1,
+ stride=1,
+ padding='SAME',
+ act='relu6',
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name=name + "_extra2_sep")
+
+ def forward(self, inputs):
+ x = self.pointwise_conv(inputs)
+ x = self.depthwise_conv(x)
+ x = self.normal_conv(x)
+ return x
+
+
+@register
+@serializable
+class MobileNetV3(nn.Layer):
+ __shared__ = ['norm_type']
+
+ def __init__(
+ self,
+ scale=1.0,
+ model_name="large",
+ feature_maps=[6, 12, 15],
+ with_extra_blocks=False,
+ extra_block_filters=[[256, 512], [128, 256], [128, 256], [64, 128]],
+ lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0],
+ conv_decay=0.0,
+ multiplier=1.0,
+ norm_type='bn',
+ norm_decay=0.0,
+ freeze_norm=False):
+ super(MobileNetV3, self).__init__()
+ if isinstance(feature_maps, Integral):
+ feature_maps = [feature_maps]
+ if norm_type == 'sync_bn' and freeze_norm:
+ raise ValueError(
+ "The norm_type should not be sync_bn when freeze_norm is True")
+ self.feature_maps = feature_maps
+ self.with_extra_blocks = with_extra_blocks
+ self.extra_block_filters = extra_block_filters
+
+ inplanes = 16
+ if model_name == "large":
+ self.cfg = [
+ # k, exp, c, se, nl, s,
+ [3, 16, 16, False, "relu", 1],
+ [3, 64, 24, False, "relu", 2],
+ [3, 72, 24, False, "relu", 1],
+ [5, 72, 40, True, "relu", 2], # RCNN output
+ [5, 120, 40, True, "relu", 1],
+ [5, 120, 40, True, "relu", 1], # YOLOv3 output
+ [3, 240, 80, False, "hard_swish", 2], # RCNN output
+ [3, 200, 80, False, "hard_swish", 1],
+ [3, 184, 80, False, "hard_swish", 1],
+ [3, 184, 80, False, "hard_swish", 1],
+ [3, 480, 112, True, "hard_swish", 1],
+ [3, 672, 112, True, "hard_swish", 1], # YOLOv3 output
+ [5, 672, 160, True, "hard_swish", 2], # SSD/SSDLite/RCNN output
+ [5, 960, 160, True, "hard_swish", 1],
+ [5, 960, 160, True, "hard_swish", 1], # YOLOv3 output
+ ]
+ elif model_name == "small":
+ self.cfg = [
+ # k, exp, c, se, nl, s,
+ [3, 16, 16, True, "relu", 2],
+ [3, 72, 24, False, "relu", 2], # RCNN output
+ [3, 88, 24, False, "relu", 1], # YOLOv3 output
+ [5, 96, 40, True, "hard_swish", 2], # RCNN output
+ [5, 240, 40, True, "hard_swish", 1],
+ [5, 240, 40, True, "hard_swish", 1],
+ [5, 120, 48, True, "hard_swish", 1],
+ [5, 144, 48, True, "hard_swish", 1], # YOLOv3 output
+ [5, 288, 96, True, "hard_swish", 2], # SSD/SSDLite/RCNN output
+ [5, 576, 96, True, "hard_swish", 1],
+ [5, 576, 96, True, "hard_swish", 1], # YOLOv3 output
+ ]
+ else:
+ raise NotImplementedError(
+ "mode[{}_model] is not implemented!".format(model_name))
+
+ if multiplier != 1.0:
+ self.cfg[-3][2] = int(self.cfg[-3][2] * multiplier)
+ self.cfg[-2][1] = int(self.cfg[-2][1] * multiplier)
+ self.cfg[-2][2] = int(self.cfg[-2][2] * multiplier)
+ self.cfg[-1][1] = int(self.cfg[-1][1] * multiplier)
+ self.cfg[-1][2] = int(self.cfg[-1][2] * multiplier)
+
+ self.conv1 = ConvBNLayer(
+ in_c=3,
+ out_c=make_divisible(inplanes * scale),
+ filter_size=3,
+ stride=2,
+ padding=1,
+ num_groups=1,
+ act="hard_swish",
+ lr_mult=lr_mult_list[0],
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="conv1")
+
+ self._out_channels = []
+ self.block_list = []
+ i = 0
+ inplanes = make_divisible(inplanes * scale)
+ for (k, exp, c, se, nl, s) in self.cfg:
+ lr_idx = min(i // 3, len(lr_mult_list) - 1)
+ lr_mult = lr_mult_list[lr_idx]
+
+ # for SSD/SSDLite, first head input is after ResidualUnit expand_conv
+ return_list = self.with_extra_blocks and i + 2 in self.feature_maps
+
+ block = self.add_sublayer(
+ "conv" + str(i + 2),
+ sublayer=ResidualUnit(
+ in_c=inplanes,
+ mid_c=make_divisible(scale * exp),
+ out_c=make_divisible(scale * c),
+ filter_size=k,
+ stride=s,
+ use_se=se,
+ act=nl,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ return_list=return_list,
+ name="conv" + str(i + 2)))
+ self.block_list.append(block)
+ inplanes = make_divisible(scale * c)
+ i += 1
+ self._update_out_channels(
+ make_divisible(scale * exp)
+ if return_list else inplanes, i + 1, feature_maps)
+
+ if self.with_extra_blocks:
+ self.extra_block_list = []
+ extra_out_c = make_divisible(scale * self.cfg[-1][1])
+ lr_idx = min(i // 3, len(lr_mult_list) - 1)
+ lr_mult = lr_mult_list[lr_idx]
+
+ conv_extra = self.add_sublayer(
+ "conv" + str(i + 2),
+ sublayer=ConvBNLayer(
+ in_c=inplanes,
+ out_c=extra_out_c,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ num_groups=1,
+ act="hard_swish",
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name="conv" + str(i + 2)))
+ self.extra_block_list.append(conv_extra)
+ i += 1
+ self._update_out_channels(extra_out_c, i + 1, feature_maps)
+
+ for j, block_filter in enumerate(self.extra_block_filters):
+ in_c = extra_out_c if j == 0 else self.extra_block_filters[j -
+ 1][1]
+ conv_extra = self.add_sublayer(
+ "conv" + str(i + 2),
+ sublayer=ExtraBlockDW(
+ in_c,
+ block_filter[0],
+ block_filter[1],
+ stride=2,
+ lr_mult=lr_mult,
+ conv_decay=conv_decay,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ name='conv' + str(i + 2)))
+ self.extra_block_list.append(conv_extra)
+ i += 1
+ self._update_out_channels(block_filter[1], i + 1, feature_maps)
+
+ def _update_out_channels(self, channel, feature_idx, feature_maps):
+ if feature_idx in feature_maps:
+ self._out_channels.append(channel)
+
+ def forward(self, inputs):
+ x = self.conv1(inputs['image'])
+ outs = []
+ for idx, block in enumerate(self.block_list):
+ x = block(x)
+ if idx + 2 in self.feature_maps:
+ if isinstance(x, list):
+ outs.append(x[0])
+ x = x[1]
+ else:
+ outs.append(x)
+
+ if not self.with_extra_blocks:
+ return outs
+
+ for i, block in enumerate(self.extra_block_list):
+ idx = i + len(self.block_list)
+ x = block(x)
+ if idx + 2 in self.feature_maps:
+ outs.append(x)
+ return outs
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/name_adapter.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/name_adapter.py
new file mode 100644
index 000000000..4afbb9b18
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/name_adapter.py
@@ -0,0 +1,69 @@
+class NameAdapter(object):
+ """Fix the backbones variable names for pretrained weight"""
+
+ def __init__(self, model):
+ super(NameAdapter, self).__init__()
+ self.model = model
+
+ @property
+ def model_type(self):
+ return getattr(self.model, '_model_type', '')
+
+ @property
+ def variant(self):
+ return getattr(self.model, 'variant', '')
+
+ def fix_conv_norm_name(self, name):
+ if name == "conv1":
+ bn_name = "bn_" + name
+ else:
+ bn_name = "bn" + name[3:]
+ # the naming rule is same as pretrained weight
+ if self.model_type == 'SEResNeXt':
+ bn_name = name + "_bn"
+ return bn_name
+
+ def fix_shortcut_name(self, name):
+ if self.model_type == 'SEResNeXt':
+ name = 'conv' + name + '_prj'
+ return name
+
+ def fix_bottleneck_name(self, name):
+ if self.model_type == 'SEResNeXt':
+ conv_name1 = 'conv' + name + '_x1'
+ conv_name2 = 'conv' + name + '_x2'
+ conv_name3 = 'conv' + name + '_x3'
+ shortcut_name = name
+ else:
+ conv_name1 = name + "_branch2a"
+ conv_name2 = name + "_branch2b"
+ conv_name3 = name + "_branch2c"
+ shortcut_name = name + "_branch1"
+ return conv_name1, conv_name2, conv_name3, shortcut_name
+
+ def fix_basicblock_name(self, name):
+ if self.model_type == 'SEResNeXt':
+ conv_name1 = 'conv' + name + '_x1'
+ conv_name2 = 'conv' + name + '_x2'
+ shortcut_name = name
+ else:
+ conv_name1 = name + "_branch2a"
+ conv_name2 = name + "_branch2b"
+ shortcut_name = name + "_branch1"
+ return conv_name1, conv_name2, shortcut_name
+
+ def fix_layer_warp_name(self, stage_num, count, i):
+ name = 'res' + str(stage_num)
+ if count > 10 and stage_num == 4:
+ if i == 0:
+ conv_name = name + "a"
+ else:
+ conv_name = name + "b" + str(i)
+ else:
+ conv_name = name + chr(ord("a") + i)
+ if self.model_type == 'SEResNeXt':
+ conv_name = str(stage_num + 2) + '_' + str(i + 1)
+ return conv_name
+
+ def fix_c1_stage_name(self):
+ return "res_conv1" if self.model_type == 'ResNeXt' else "conv1"
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/res2net.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/res2net.py
new file mode 100644
index 000000000..9e7677247
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/res2net.py
@@ -0,0 +1,357 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from numbers import Integral
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register, serializable
+from ..shape_spec import ShapeSpec
+from .resnet import ConvNormLayer
+
+__all__ = ['Res2Net', 'Res2NetC5']
+
+Res2Net_cfg = {
+ 50: [3, 4, 6, 3],
+ 101: [3, 4, 23, 3],
+ 152: [3, 8, 36, 3],
+ 200: [3, 12, 48, 3]
+}
+
+
+class BottleNeck(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ stride,
+ shortcut,
+ width,
+ scales=4,
+ variant='b',
+ groups=1,
+ lr=1.0,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=True,
+ dcn_v2=False):
+ super(BottleNeck, self).__init__()
+
+ self.shortcut = shortcut
+ self.scales = scales
+ self.stride = stride
+ if not shortcut:
+ if variant == 'd' and stride == 2:
+ self.branch1 = nn.Sequential()
+ self.branch1.add_sublayer(
+ 'pool',
+ nn.AvgPool2D(
+ kernel_size=2, stride=2, padding=0, ceil_mode=True))
+ self.branch1.add_sublayer(
+ 'conv',
+ ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr))
+ else:
+ self.branch1 = ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ filter_size=1,
+ stride=stride,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr)
+
+ self.branch2a = ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=width * scales,
+ filter_size=1,
+ stride=stride if variant == 'a' else 1,
+ groups=1,
+ act='relu',
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr)
+
+ self.branch2b = nn.LayerList([
+ ConvNormLayer(
+ ch_in=width,
+ ch_out=width,
+ filter_size=3,
+ stride=1 if variant == 'a' else stride,
+ groups=groups,
+ act='relu',
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr,
+ dcn_v2=dcn_v2) for _ in range(self.scales - 1)
+ ])
+
+ self.branch2c = ConvNormLayer(
+ ch_in=width * scales,
+ ch_out=ch_out,
+ filter_size=1,
+ stride=1,
+ groups=1,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr)
+
+ def forward(self, inputs):
+
+ out = self.branch2a(inputs)
+ feature_split = paddle.split(out, self.scales, 1)
+ out_split = []
+ for i in range(self.scales - 1):
+ if i == 0 or self.stride == 2:
+ out_split.append(self.branch2b[i](feature_split[i]))
+ else:
+ out_split.append(self.branch2b[i](paddle.add(feature_split[i],
+ out_split[-1])))
+ if self.stride == 1:
+ out_split.append(feature_split[-1])
+ else:
+ out_split.append(F.avg_pool2d(feature_split[-1], 3, self.stride, 1))
+ out = self.branch2c(paddle.concat(out_split, 1))
+
+ if self.shortcut:
+ short = inputs
+ else:
+ short = self.branch1(inputs)
+
+ out = paddle.add(out, short)
+ out = F.relu(out)
+
+ return out
+
+
+class Blocks(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ count,
+ stage_num,
+ width,
+ scales=4,
+ variant='b',
+ groups=1,
+ lr=1.0,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=True,
+ dcn_v2=False):
+ super(Blocks, self).__init__()
+
+ self.blocks = nn.Sequential()
+ for i in range(count):
+ self.blocks.add_sublayer(
+ str(i),
+ BottleNeck(
+ ch_in=ch_in if i == 0 else ch_out,
+ ch_out=ch_out,
+ stride=2 if i == 0 and stage_num != 2 else 1,
+ shortcut=False if i == 0 else True,
+ width=width * (2**(stage_num - 2)),
+ scales=scales,
+ variant=variant,
+ groups=groups,
+ lr=lr,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ dcn_v2=dcn_v2))
+
+ def forward(self, inputs):
+ return self.blocks(inputs)
+
+
+@register
+@serializable
+class Res2Net(nn.Layer):
+ """
+ Res2Net, see https://arxiv.org/abs/1904.01169
+ Args:
+ depth (int): Res2Net depth, should be 50, 101, 152, 200.
+ width (int): Res2Net width
+ scales (int): Res2Net scale
+ variant (str): Res2Net variant, supports 'a', 'b', 'c', 'd' currently
+ lr_mult_list (list): learning rate ratio of different resnet stages(2,3,4,5),
+ lower learning rate ratio is need for pretrained model
+ got using distillation(default as [1.0, 1.0, 1.0, 1.0]).
+ groups (int): The groups number of the Conv Layer.
+ norm_type (str): normalization type, 'bn' or 'sync_bn'
+ norm_decay (float): weight decay for normalization layer weights
+ freeze_norm (bool): freeze normalization layers
+ freeze_at (int): freeze the backbone at which stage
+ return_idx (list): index of stages whose feature maps are returned,
+ index 0 stands for res2
+ dcn_v2_stages (list): index of stages who select deformable conv v2
+ num_stages (int): number of stages created
+
+ """
+ __shared__ = ['norm_type']
+
+ def __init__(self,
+ depth=50,
+ width=26,
+ scales=4,
+ variant='b',
+ lr_mult_list=[1.0, 1.0, 1.0, 1.0],
+ groups=1,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=True,
+ freeze_at=0,
+ return_idx=[0, 1, 2, 3],
+ dcn_v2_stages=[-1],
+ num_stages=4):
+ super(Res2Net, self).__init__()
+
+ self._model_type = 'Res2Net' if groups == 1 else 'Res2NeXt'
+
+ assert depth in [50, 101, 152, 200], \
+ "depth {} not in [50, 101, 152, 200]"
+ assert variant in ['a', 'b', 'c', 'd'], "invalid Res2Net variant"
+ assert num_stages >= 1 and num_stages <= 4
+
+ self.depth = depth
+ self.variant = variant
+ self.norm_type = norm_type
+ self.norm_decay = norm_decay
+ self.freeze_norm = freeze_norm
+ self.freeze_at = freeze_at
+ if isinstance(return_idx, Integral):
+ return_idx = [return_idx]
+ assert max(return_idx) < num_stages, \
+ 'the maximum return index must smaller than num_stages, ' \
+ 'but received maximum return index is {} and num_stages ' \
+ 'is {}'.format(max(return_idx), num_stages)
+ self.return_idx = return_idx
+ self.num_stages = num_stages
+ assert len(lr_mult_list) == 4, \
+ "lr_mult_list length must be 4 but got {}".format(len(lr_mult_list))
+ if isinstance(dcn_v2_stages, Integral):
+ dcn_v2_stages = [dcn_v2_stages]
+ assert max(dcn_v2_stages) < num_stages
+ self.dcn_v2_stages = dcn_v2_stages
+
+ block_nums = Res2Net_cfg[depth]
+
+ # C1 stage
+ if self.variant in ['c', 'd']:
+ conv_def = [
+ [3, 32, 3, 2, "conv1_1"],
+ [32, 32, 3, 1, "conv1_2"],
+ [32, 64, 3, 1, "conv1_3"],
+ ]
+ else:
+ conv_def = [[3, 64, 7, 2, "conv1"]]
+ self.res1 = nn.Sequential()
+ for (c_in, c_out, k, s, _name) in conv_def:
+ self.res1.add_sublayer(
+ _name,
+ ConvNormLayer(
+ ch_in=c_in,
+ ch_out=c_out,
+ filter_size=k,
+ stride=s,
+ groups=1,
+ act='relu',
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=1.0))
+
+ self._in_channels = [64, 256, 512, 1024]
+ self._out_channels = [256, 512, 1024, 2048]
+ self._out_strides = [4, 8, 16, 32]
+
+ # C2-C5 stages
+ self.res_layers = []
+ for i in range(num_stages):
+ lr_mult = lr_mult_list[i]
+ stage_num = i + 2
+ self.res_layers.append(
+ self.add_sublayer(
+ "res{}".format(stage_num),
+ Blocks(
+ self._in_channels[i],
+ self._out_channels[i],
+ count=block_nums[i],
+ stage_num=stage_num,
+ width=width,
+ scales=scales,
+ groups=groups,
+ lr=lr_mult,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ dcn_v2=(i in self.dcn_v2_stages))))
+
+ @property
+ def out_shape(self):
+ return [
+ ShapeSpec(
+ channels=self._out_channels[i], stride=self._out_strides[i])
+ for i in self.return_idx
+ ]
+
+ def forward(self, inputs):
+ x = inputs['image']
+ res1 = self.res1(x)
+ x = F.max_pool2d(res1, kernel_size=3, stride=2, padding=1)
+ outs = []
+ for idx, stage in enumerate(self.res_layers):
+ x = stage(x)
+ if idx == self.freeze_at:
+ x.stop_gradient = True
+ if idx in self.return_idx:
+ outs.append(x)
+ return outs
+
+
+@register
+class Res2NetC5(nn.Layer):
+ def __init__(self, depth=50, width=26, scales=4, variant='b'):
+ super(Res2NetC5, self).__init__()
+ feat_in, feat_out = [1024, 2048]
+ self.res5 = Blocks(
+ feat_in,
+ feat_out,
+ count=3,
+ stage_num=5,
+ width=width,
+ scales=scales,
+ variant=variant)
+ self.feat_out = feat_out
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(
+ channels=self.feat_out,
+ stride=32, )]
+
+ def forward(self, roi_feat, stage=0):
+ y = self.res5(roi_feat)
+ return y
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/resnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/resnet.py
new file mode 100644
index 000000000..d4bc878ea
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/resnet.py
@@ -0,0 +1,613 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from numbers import Integral
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register, serializable
+from paddle.regularizer import L2Decay
+from paddle.nn.initializer import Uniform
+from paddle import ParamAttr
+from paddle.nn.initializer import Constant
+from paddle.vision.ops import DeformConv2D
+from .name_adapter import NameAdapter
+from ..shape_spec import ShapeSpec
+
+__all__ = ['ResNet', 'Res5Head', 'Blocks', 'BasicBlock', 'BottleNeck']
+
+ResNet_cfg = {
+ 18: [2, 2, 2, 2],
+ 34: [3, 4, 6, 3],
+ 50: [3, 4, 6, 3],
+ 101: [3, 4, 23, 3],
+ 152: [3, 8, 36, 3],
+}
+
+
+class ConvNormLayer(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ filter_size,
+ stride,
+ groups=1,
+ act=None,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=True,
+ lr=1.0,
+ dcn_v2=False):
+ super(ConvNormLayer, self).__init__()
+ assert norm_type in ['bn', 'sync_bn']
+ self.norm_type = norm_type
+ self.act = act
+ self.dcn_v2 = dcn_v2
+
+ if not self.dcn_v2:
+ self.conv = nn.Conv2D(
+ in_channels=ch_in,
+ out_channels=ch_out,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(learning_rate=lr),
+ bias_attr=False)
+ else:
+ self.offset_channel = 2 * filter_size**2
+ self.mask_channel = filter_size**2
+
+ self.conv_offset = nn.Conv2D(
+ in_channels=ch_in,
+ out_channels=3 * filter_size**2,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ weight_attr=ParamAttr(initializer=Constant(0.)),
+ bias_attr=ParamAttr(initializer=Constant(0.)))
+ self.conv = DeformConv2D(
+ in_channels=ch_in,
+ out_channels=ch_out,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ dilation=1,
+ groups=groups,
+ weight_attr=ParamAttr(learning_rate=lr),
+ bias_attr=False)
+
+ norm_lr = 0. if freeze_norm else lr
+ param_attr = ParamAttr(
+ learning_rate=norm_lr,
+ regularizer=L2Decay(norm_decay),
+ trainable=False if freeze_norm else True)
+ bias_attr = ParamAttr(
+ learning_rate=norm_lr,
+ regularizer=L2Decay(norm_decay),
+ trainable=False if freeze_norm else True)
+
+ global_stats = True if freeze_norm else False
+ if norm_type == 'sync_bn':
+ self.norm = nn.SyncBatchNorm(
+ ch_out, weight_attr=param_attr, bias_attr=bias_attr)
+ else:
+ self.norm = nn.BatchNorm(
+ ch_out,
+ act=None,
+ param_attr=param_attr,
+ bias_attr=bias_attr,
+ use_global_stats=global_stats)
+ norm_params = self.norm.parameters()
+
+ if freeze_norm:
+ for param in norm_params:
+ param.stop_gradient = True
+
+ def forward(self, inputs):
+ if not self.dcn_v2:
+ out = self.conv(inputs)
+ else:
+ offset_mask = self.conv_offset(inputs)
+ offset, mask = paddle.split(
+ offset_mask,
+ num_or_sections=[self.offset_channel, self.mask_channel],
+ axis=1)
+ mask = F.sigmoid(mask)
+ out = self.conv(inputs, offset, mask=mask)
+
+ if self.norm_type in ['bn', 'sync_bn']:
+ out = self.norm(out)
+ if self.act:
+ out = getattr(F, self.act)(out)
+ return out
+
+
+class SELayer(nn.Layer):
+ def __init__(self, ch, reduction_ratio=16):
+ super(SELayer, self).__init__()
+ self.pool = nn.AdaptiveAvgPool2D(1)
+ stdv = 1.0 / math.sqrt(ch)
+ c_ = ch // reduction_ratio
+ self.squeeze = nn.Linear(
+ ch,
+ c_,
+ weight_attr=paddle.ParamAttr(initializer=Uniform(-stdv, stdv)),
+ bias_attr=True)
+
+ stdv = 1.0 / math.sqrt(c_)
+ self.extract = nn.Linear(
+ c_,
+ ch,
+ weight_attr=paddle.ParamAttr(initializer=Uniform(-stdv, stdv)),
+ bias_attr=True)
+
+ def forward(self, inputs):
+ out = self.pool(inputs)
+ out = paddle.squeeze(out, axis=[2, 3])
+ out = self.squeeze(out)
+ out = F.relu(out)
+ out = self.extract(out)
+ out = F.sigmoid(out)
+ out = paddle.unsqueeze(out, axis=[2, 3])
+ scale = out * inputs
+ return scale
+
+
+class BasicBlock(nn.Layer):
+
+ expansion = 1
+
+ def __init__(self,
+ ch_in,
+ ch_out,
+ stride,
+ shortcut,
+ variant='b',
+ groups=1,
+ base_width=64,
+ lr=1.0,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=True,
+ dcn_v2=False,
+ std_senet=False):
+ super(BasicBlock, self).__init__()
+ assert groups == 1 and base_width == 64, 'BasicBlock only supports groups=1 and base_width=64'
+
+ self.shortcut = shortcut
+ if not shortcut:
+ if variant == 'd' and stride == 2:
+ self.short = nn.Sequential()
+ self.short.add_sublayer(
+ 'pool',
+ nn.AvgPool2D(
+ kernel_size=2, stride=2, padding=0, ceil_mode=True))
+ self.short.add_sublayer(
+ 'conv',
+ ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr))
+ else:
+ self.short = ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ filter_size=1,
+ stride=stride,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr)
+
+ self.branch2a = ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ filter_size=3,
+ stride=stride,
+ act='relu',
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr)
+
+ self.branch2b = ConvNormLayer(
+ ch_in=ch_out,
+ ch_out=ch_out,
+ filter_size=3,
+ stride=1,
+ act=None,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr,
+ dcn_v2=dcn_v2)
+
+ self.std_senet = std_senet
+ if self.std_senet:
+ self.se = SELayer(ch_out)
+
+ def forward(self, inputs):
+ out = self.branch2a(inputs)
+ out = self.branch2b(out)
+ if self.std_senet:
+ out = self.se(out)
+
+ if self.shortcut:
+ short = inputs
+ else:
+ short = self.short(inputs)
+
+ out = paddle.add(x=out, y=short)
+ out = F.relu(out)
+
+ return out
+
+
+class BottleNeck(nn.Layer):
+
+ expansion = 4
+
+ def __init__(self,
+ ch_in,
+ ch_out,
+ stride,
+ shortcut,
+ variant='b',
+ groups=1,
+ base_width=4,
+ lr=1.0,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=True,
+ dcn_v2=False,
+ std_senet=False):
+ super(BottleNeck, self).__init__()
+ if variant == 'a':
+ stride1, stride2 = stride, 1
+ else:
+ stride1, stride2 = 1, stride
+
+ # ResNeXt
+ width = int(ch_out * (base_width / 64.)) * groups
+
+ self.shortcut = shortcut
+ if not shortcut:
+ if variant == 'd' and stride == 2:
+ self.short = nn.Sequential()
+ self.short.add_sublayer(
+ 'pool',
+ nn.AvgPool2D(
+ kernel_size=2, stride=2, padding=0, ceil_mode=True))
+ self.short.add_sublayer(
+ 'conv',
+ ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_out * self.expansion,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr))
+ else:
+ self.short = ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_out * self.expansion,
+ filter_size=1,
+ stride=stride,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr)
+
+ self.branch2a = ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=width,
+ filter_size=1,
+ stride=stride1,
+ groups=1,
+ act='relu',
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr)
+
+ self.branch2b = ConvNormLayer(
+ ch_in=width,
+ ch_out=width,
+ filter_size=3,
+ stride=stride2,
+ groups=groups,
+ act='relu',
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr,
+ dcn_v2=dcn_v2)
+
+ self.branch2c = ConvNormLayer(
+ ch_in=width,
+ ch_out=ch_out * self.expansion,
+ filter_size=1,
+ stride=1,
+ groups=1,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=lr)
+
+ self.std_senet = std_senet
+ if self.std_senet:
+ self.se = SELayer(ch_out * self.expansion)
+
+ def forward(self, inputs):
+
+ out = self.branch2a(inputs)
+ out = self.branch2b(out)
+ out = self.branch2c(out)
+
+ if self.std_senet:
+ out = self.se(out)
+
+ if self.shortcut:
+ short = inputs
+ else:
+ short = self.short(inputs)
+
+ out = paddle.add(x=out, y=short)
+ out = F.relu(out)
+
+ return out
+
+
+class Blocks(nn.Layer):
+ def __init__(self,
+ block,
+ ch_in,
+ ch_out,
+ count,
+ name_adapter,
+ stage_num,
+ variant='b',
+ groups=1,
+ base_width=64,
+ lr=1.0,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=True,
+ dcn_v2=False,
+ std_senet=False):
+ super(Blocks, self).__init__()
+
+ self.blocks = []
+ for i in range(count):
+ conv_name = name_adapter.fix_layer_warp_name(stage_num, count, i)
+ layer = self.add_sublayer(
+ conv_name,
+ block(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ stride=2 if i == 0 and stage_num != 2 else 1,
+ shortcut=False if i == 0 else True,
+ variant=variant,
+ groups=groups,
+ base_width=base_width,
+ lr=lr,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ dcn_v2=dcn_v2,
+ std_senet=std_senet))
+ self.blocks.append(layer)
+ if i == 0:
+ ch_in = ch_out * block.expansion
+
+ def forward(self, inputs):
+ block_out = inputs
+ for block in self.blocks:
+ block_out = block(block_out)
+ return block_out
+
+
+@register
+@serializable
+class ResNet(nn.Layer):
+ __shared__ = ['norm_type']
+
+ def __init__(self,
+ depth=50,
+ ch_in=64,
+ variant='b',
+ lr_mult_list=[1.0, 1.0, 1.0, 1.0],
+ groups=1,
+ base_width=64,
+ norm_type='bn',
+ norm_decay=0,
+ freeze_norm=True,
+ freeze_at=0,
+ return_idx=[0, 1, 2, 3],
+ dcn_v2_stages=[-1],
+ num_stages=4,
+ std_senet=False):
+ """
+ Residual Network, see https://arxiv.org/abs/1512.03385
+
+ Args:
+ depth (int): ResNet depth, should be 18, 34, 50, 101, 152.
+ ch_in (int): output channel of first stage, default 64
+ variant (str): ResNet variant, supports 'a', 'b', 'c', 'd' currently
+ lr_mult_list (list): learning rate ratio of different resnet stages(2,3,4,5),
+ lower learning rate ratio is need for pretrained model
+ got using distillation(default as [1.0, 1.0, 1.0, 1.0]).
+ groups (int): group convolution cardinality
+ base_width (int): base width of each group convolution
+ norm_type (str): normalization type, 'bn', 'sync_bn' or 'affine_channel'
+ norm_decay (float): weight decay for normalization layer weights
+ freeze_norm (bool): freeze normalization layers
+ freeze_at (int): freeze the backbone at which stage
+ return_idx (list): index of the stages whose feature maps are returned
+ dcn_v2_stages (list): index of stages who select deformable conv v2
+ num_stages (int): total num of stages
+ std_senet (bool): whether use senet, default True
+ """
+ super(ResNet, self).__init__()
+ self._model_type = 'ResNet' if groups == 1 else 'ResNeXt'
+ assert num_stages >= 1 and num_stages <= 4
+ self.depth = depth
+ self.variant = variant
+ self.groups = groups
+ self.base_width = base_width
+ self.norm_type = norm_type
+ self.norm_decay = norm_decay
+ self.freeze_norm = freeze_norm
+ self.freeze_at = freeze_at
+ if isinstance(return_idx, Integral):
+ return_idx = [return_idx]
+ assert max(return_idx) < num_stages, \
+ 'the maximum return index must smaller than num_stages, ' \
+ 'but received maximum return index is {} and num_stages ' \
+ 'is {}'.format(max(return_idx), num_stages)
+ self.return_idx = return_idx
+ self.num_stages = num_stages
+ assert len(lr_mult_list) == 4, \
+ "lr_mult_list length must be 4 but got {}".format(len(lr_mult_list))
+ if isinstance(dcn_v2_stages, Integral):
+ dcn_v2_stages = [dcn_v2_stages]
+ assert max(dcn_v2_stages) < num_stages
+
+ if isinstance(dcn_v2_stages, Integral):
+ dcn_v2_stages = [dcn_v2_stages]
+ assert max(dcn_v2_stages) < num_stages
+ self.dcn_v2_stages = dcn_v2_stages
+
+ block_nums = ResNet_cfg[depth]
+ na = NameAdapter(self)
+
+ conv1_name = na.fix_c1_stage_name()
+ if variant in ['c', 'd']:
+ conv_def = [
+ [3, ch_in // 2, 3, 2, "conv1_1"],
+ [ch_in // 2, ch_in // 2, 3, 1, "conv1_2"],
+ [ch_in // 2, ch_in, 3, 1, "conv1_3"],
+ ]
+ else:
+ conv_def = [[3, ch_in, 7, 2, conv1_name]]
+ self.conv1 = nn.Sequential()
+ for (c_in, c_out, k, s, _name) in conv_def:
+ self.conv1.add_sublayer(
+ _name,
+ ConvNormLayer(
+ ch_in=c_in,
+ ch_out=c_out,
+ filter_size=k,
+ stride=s,
+ groups=1,
+ act='relu',
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ lr=1.0))
+
+ self.ch_in = ch_in
+ ch_out_list = [64, 128, 256, 512]
+ block = BottleNeck if depth >= 50 else BasicBlock
+
+ self._out_channels = [block.expansion * v for v in ch_out_list]
+ self._out_strides = [4, 8, 16, 32]
+
+ self.res_layers = []
+ for i in range(num_stages):
+ lr_mult = lr_mult_list[i]
+ stage_num = i + 2
+ res_name = "res{}".format(stage_num)
+ res_layer = self.add_sublayer(
+ res_name,
+ Blocks(
+ block,
+ self.ch_in,
+ ch_out_list[i],
+ count=block_nums[i],
+ name_adapter=na,
+ stage_num=stage_num,
+ variant=variant,
+ groups=groups,
+ base_width=base_width,
+ lr=lr_mult,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ dcn_v2=(i in self.dcn_v2_stages),
+ std_senet=std_senet))
+ self.res_layers.append(res_layer)
+ self.ch_in = self._out_channels[i]
+
+ if freeze_at >= 0:
+ self._freeze_parameters(self.conv1)
+ for i in range(min(freeze_at + 1, num_stages)):
+ self._freeze_parameters(self.res_layers[i])
+
+ def _freeze_parameters(self, m):
+ for p in m.parameters():
+ p.stop_gradient = True
+
+ @property
+ def out_shape(self):
+ return [
+ ShapeSpec(
+ channels=self._out_channels[i], stride=self._out_strides[i])
+ for i in self.return_idx
+ ]
+
+ def forward(self, inputs):
+ x = inputs['image']
+ conv1 = self.conv1(x)
+ x = F.max_pool2d(conv1, kernel_size=3, stride=2, padding=1)
+ outs = []
+ for idx, stage in enumerate(self.res_layers):
+ x = stage(x)
+ if idx in self.return_idx:
+ outs.append(x)
+ return outs
+
+
+@register
+class Res5Head(nn.Layer):
+ def __init__(self, depth=50):
+ super(Res5Head, self).__init__()
+ feat_in, feat_out = [1024, 512]
+ if depth < 50:
+ feat_in = 256
+ na = NameAdapter(self)
+ block = BottleNeck if depth >= 50 else BasicBlock
+ self.res5 = Blocks(
+ block, feat_in, feat_out, count=3, name_adapter=na, stage_num=5)
+ self.feat_out = feat_out if depth < 50 else feat_out * 4
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(
+ channels=self.feat_out,
+ stride=16, )]
+
+ def forward(self, roi_feat, stage=0):
+ y = self.res5(roi_feat)
+ return y
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/senet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/senet.py
new file mode 100644
index 000000000..eb0bad33f
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/senet.py
@@ -0,0 +1,139 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.nn as nn
+
+from ppdet.core.workspace import register, serializable
+from .resnet import ResNet, Blocks, BasicBlock, BottleNeck
+
+__all__ = ['SENet', 'SERes5Head']
+
+
+@register
+@serializable
+class SENet(ResNet):
+ __shared__ = ['norm_type']
+
+ def __init__(self,
+ depth=50,
+ variant='b',
+ lr_mult_list=[1.0, 1.0, 1.0, 1.0],
+ groups=1,
+ base_width=64,
+ norm_type='bn',
+ norm_decay=0,
+ freeze_norm=True,
+ freeze_at=0,
+ return_idx=[0, 1, 2, 3],
+ dcn_v2_stages=[-1],
+ std_senet=True,
+ num_stages=4):
+ """
+ Squeeze-and-Excitation Networks, see https://arxiv.org/abs/1709.01507
+
+ Args:
+ depth (int): SENet depth, should be 50, 101, 152
+ variant (str): ResNet variant, supports 'a', 'b', 'c', 'd' currently
+ lr_mult_list (list): learning rate ratio of different resnet stages(2,3,4,5),
+ lower learning rate ratio is need for pretrained model
+ got using distillation(default as [1.0, 1.0, 1.0, 1.0]).
+ groups (int): group convolution cardinality
+ base_width (int): base width of each group convolution
+ norm_type (str): normalization type, 'bn', 'sync_bn' or 'affine_channel'
+ norm_decay (float): weight decay for normalization layer weights
+ freeze_norm (bool): freeze normalization layers
+ freeze_at (int): freeze the backbone at which stage
+ return_idx (list): index of the stages whose feature maps are returned
+ dcn_v2_stages (list): index of stages who select deformable conv v2
+ std_senet (bool): whether use senet, default True
+ num_stages (int): total num of stages
+ """
+
+ super(SENet, self).__init__(
+ depth=depth,
+ variant=variant,
+ lr_mult_list=lr_mult_list,
+ ch_in=128,
+ groups=groups,
+ base_width=base_width,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ freeze_at=freeze_at,
+ return_idx=return_idx,
+ dcn_v2_stages=dcn_v2_stages,
+ std_senet=std_senet,
+ num_stages=num_stages)
+
+
+@register
+class SERes5Head(nn.Layer):
+ def __init__(self,
+ depth=50,
+ variant='b',
+ lr_mult=1.0,
+ groups=1,
+ base_width=64,
+ norm_type='bn',
+ norm_decay=0,
+ dcn_v2=False,
+ freeze_norm=False,
+ std_senet=True):
+ """
+ SERes5Head layer
+
+ Args:
+ depth (int): SENet depth, should be 50, 101, 152
+ variant (str): ResNet variant, supports 'a', 'b', 'c', 'd' currently
+ lr_mult (list): learning rate ratio of SERes5Head, default as 1.0.
+ groups (int): group convolution cardinality
+ base_width (int): base width of each group convolution
+ norm_type (str): normalization type, 'bn', 'sync_bn' or 'affine_channel'
+ norm_decay (float): weight decay for normalization layer weights
+ dcn_v2_stages (list): index of stages who select deformable conv v2
+ std_senet (bool): whether use senet, default True
+
+ """
+ super(SERes5Head, self).__init__()
+ ch_out = 512
+ ch_in = 256 if depth < 50 else 1024
+ na = NameAdapter(self)
+ block = BottleNeck if depth >= 50 else BasicBlock
+ self.res5 = Blocks(
+ block,
+ ch_in,
+ ch_out,
+ count=3,
+ name_adapter=na,
+ stage_num=5,
+ variant=variant,
+ groups=groups,
+ base_width=base_width,
+ lr=lr_mult,
+ norm_type=norm_type,
+ norm_decay=norm_decay,
+ freeze_norm=freeze_norm,
+ dcn_v2=dcn_v2,
+ std_senet=std_senet)
+ self.ch_out = ch_out * block.expansion
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(
+ channels=self.ch_out,
+ stride=16, )]
+
+ def forward(self, roi_feat):
+ y = self.res5(roi_feat)
+ return y
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/shufflenet_v2.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/shufflenet_v2.py
new file mode 100644
index 000000000..59b0502a1
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/shufflenet_v2.py
@@ -0,0 +1,246 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+from paddle import ParamAttr
+from paddle.nn import Conv2D, MaxPool2D, AdaptiveAvgPool2D, BatchNorm
+from paddle.nn.initializer import KaimingNormal
+from paddle.regularizer import L2Decay
+
+from ppdet.core.workspace import register, serializable
+from numbers import Integral
+from ..shape_spec import ShapeSpec
+from ppdet.modeling.ops import channel_shuffle
+
+__all__ = ['ShuffleNetV2']
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ padding,
+ groups=1,
+ act=None):
+ super(ConvBNLayer, self).__init__()
+ self._conv = Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ groups=groups,
+ weight_attr=ParamAttr(initializer=KaimingNormal()),
+ bias_attr=False)
+
+ self._batch_norm = BatchNorm(
+ out_channels,
+ param_attr=ParamAttr(regularizer=L2Decay(0.0)),
+ bias_attr=ParamAttr(regularizer=L2Decay(0.0)),
+ act=act)
+
+ def forward(self, inputs):
+ y = self._conv(inputs)
+ y = self._batch_norm(y)
+ return y
+
+
+class InvertedResidual(nn.Layer):
+ def __init__(self, in_channels, out_channels, stride, act="relu"):
+ super(InvertedResidual, self).__init__()
+ self._conv_pw = ConvBNLayer(
+ in_channels=in_channels // 2,
+ out_channels=out_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+ self._conv_dw = ConvBNLayer(
+ in_channels=out_channels // 2,
+ out_channels=out_channels // 2,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ groups=out_channels // 2,
+ act=None)
+ self._conv_linear = ConvBNLayer(
+ in_channels=out_channels // 2,
+ out_channels=out_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+
+ def forward(self, inputs):
+ x1, x2 = paddle.split(
+ inputs,
+ num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2],
+ axis=1)
+ x2 = self._conv_pw(x2)
+ x2 = self._conv_dw(x2)
+ x2 = self._conv_linear(x2)
+ out = paddle.concat([x1, x2], axis=1)
+ return channel_shuffle(out, 2)
+
+
+class InvertedResidualDS(nn.Layer):
+ def __init__(self, in_channels, out_channels, stride, act="relu"):
+ super(InvertedResidualDS, self).__init__()
+
+ # branch1
+ self._conv_dw_1 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=in_channels,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ groups=in_channels,
+ act=None)
+ self._conv_linear_1 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+ # branch2
+ self._conv_pw_2 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+ self._conv_dw_2 = ConvBNLayer(
+ in_channels=out_channels // 2,
+ out_channels=out_channels // 2,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ groups=out_channels // 2,
+ act=None)
+ self._conv_linear_2 = ConvBNLayer(
+ in_channels=out_channels // 2,
+ out_channels=out_channels // 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ act=act)
+
+ def forward(self, inputs):
+ x1 = self._conv_dw_1(inputs)
+ x1 = self._conv_linear_1(x1)
+ x2 = self._conv_pw_2(inputs)
+ x2 = self._conv_dw_2(x2)
+ x2 = self._conv_linear_2(x2)
+ out = paddle.concat([x1, x2], axis=1)
+
+ return channel_shuffle(out, 2)
+
+
+@register
+@serializable
+class ShuffleNetV2(nn.Layer):
+ def __init__(self, scale=1.0, act="relu", feature_maps=[5, 13, 17]):
+ super(ShuffleNetV2, self).__init__()
+ self.scale = scale
+ if isinstance(feature_maps, Integral):
+ feature_maps = [feature_maps]
+ self.feature_maps = feature_maps
+ stage_repeats = [4, 8, 4]
+
+ if scale == 0.25:
+ stage_out_channels = [-1, 24, 24, 48, 96, 512]
+ elif scale == 0.33:
+ stage_out_channels = [-1, 24, 32, 64, 128, 512]
+ elif scale == 0.5:
+ stage_out_channels = [-1, 24, 48, 96, 192, 1024]
+ elif scale == 1.0:
+ stage_out_channels = [-1, 24, 116, 232, 464, 1024]
+ elif scale == 1.5:
+ stage_out_channels = [-1, 24, 176, 352, 704, 1024]
+ elif scale == 2.0:
+ stage_out_channels = [-1, 24, 224, 488, 976, 2048]
+ else:
+ raise NotImplementedError("This scale size:[" + str(scale) +
+ "] is not implemented!")
+
+ self._out_channels = []
+ self._feature_idx = 0
+ # 1. conv1
+ self._conv1 = ConvBNLayer(
+ in_channels=3,
+ out_channels=stage_out_channels[1],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ act=act)
+ self._max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1)
+ self._feature_idx += 1
+
+ # 2. bottleneck sequences
+ self._block_list = []
+ for stage_id, num_repeat in enumerate(stage_repeats):
+ for i in range(num_repeat):
+ if i == 0:
+ block = self.add_sublayer(
+ name=str(stage_id + 2) + '_' + str(i + 1),
+ sublayer=InvertedResidualDS(
+ in_channels=stage_out_channels[stage_id + 1],
+ out_channels=stage_out_channels[stage_id + 2],
+ stride=2,
+ act=act))
+ else:
+ block = self.add_sublayer(
+ name=str(stage_id + 2) + '_' + str(i + 1),
+ sublayer=InvertedResidual(
+ in_channels=stage_out_channels[stage_id + 2],
+ out_channels=stage_out_channels[stage_id + 2],
+ stride=1,
+ act=act))
+ self._block_list.append(block)
+ self._feature_idx += 1
+ self._update_out_channels(stage_out_channels[stage_id + 2],
+ self._feature_idx, self.feature_maps)
+
+ def _update_out_channels(self, channel, feature_idx, feature_maps):
+ if feature_idx in feature_maps:
+ self._out_channels.append(channel)
+
+ def forward(self, inputs):
+ y = self._conv1(inputs['image'])
+ y = self._max_pool(y)
+ outs = []
+ for i, inv in enumerate(self._block_list):
+ y = inv(y)
+ if i + 2 in self.feature_maps:
+ outs.append(y)
+
+ return outs
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/swin_transformer.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/swin_transformer.py
new file mode 100644
index 000000000..027e4f67a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/swin_transformer.py
@@ -0,0 +1,740 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py
+Ths copyright of microsoft/Swin-Transformer is as follows:
+MIT License [see LICENSE for details]
+"""
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import TruncatedNormal, Constant, Assign
+from ppdet.modeling.shape_spec import ShapeSpec
+from ppdet.core.workspace import register, serializable
+import numpy as np
+
+# Common initializations
+ones_ = Constant(value=1.)
+zeros_ = Constant(value=0.)
+trunc_normal_ = TruncatedNormal(std=.02)
+
+
+# Common Functions
+def to_2tuple(x):
+ return tuple([x] * 2)
+
+
+def add_parameter(layer, datas, name=None):
+ parameter = layer.create_parameter(
+ shape=(datas.shape), default_initializer=Assign(datas))
+ if name:
+ layer.add_parameter(name, parameter)
+ return parameter
+
+
+# Common Layers
+def drop_path(x, drop_prob=0., training=False):
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+ the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ...
+ """
+ if drop_prob == 0. or not training:
+ return x
+ keep_prob = paddle.to_tensor(1 - drop_prob)
+ shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
+ random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype)
+ random_tensor = paddle.floor(random_tensor) # binarize
+ output = x.divide(keep_prob) * random_tensor
+ return output
+
+
+class DropPath(nn.Layer):
+ def __init__(self, drop_prob=None):
+ super(DropPath, self).__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, x):
+ return drop_path(x, self.drop_prob, self.training)
+
+
+class Identity(nn.Layer):
+ def __init__(self):
+ super(Identity, self).__init__()
+
+ def forward(self, input):
+ return input
+
+
+class Mlp(nn.Layer):
+ def __init__(self,
+ in_features,
+ hidden_features=None,
+ out_features=None,
+ act_layer=nn.GELU,
+ drop=0.):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+def window_partition(x, window_size):
+ """
+ Args:
+ x: (B, H, W, C)
+ window_size (int): window size
+ Returns:
+ windows: (num_windows*B, window_size, window_size, C)
+ """
+ B, H, W, C = x.shape
+ x = x.reshape(
+ [B, H // window_size, window_size, W // window_size, window_size, C])
+ windows = x.transpose([0, 1, 3, 2, 4, 5]).reshape(
+ [-1, window_size, window_size, C])
+ return windows
+
+
+def window_reverse(windows, window_size, H, W):
+ """
+ Args:
+ windows: (num_windows*B, window_size, window_size, C)
+ window_size (int): Window size
+ H (int): Height of image
+ W (int): Width of image
+ Returns:
+ x: (B, H, W, C)
+ """
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
+ x = windows.reshape(
+ [B, H // window_size, W // window_size, window_size, window_size, -1])
+ x = x.transpose([0, 1, 3, 2, 4, 5]).reshape([B, H, W, -1])
+ return x
+
+
+class WindowAttention(nn.Layer):
+ """ Window based multi-head self attention (W-MSA) module with relative position bias.
+ It supports both of shifted and non-shifted window.
+
+ Args:
+ dim (int): Number of input channels.
+ window_size (tuple[int]): The height and width of the window.
+ num_heads (int): Number of attention heads.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
+ """
+
+ def __init__(self,
+ dim,
+ window_size,
+ num_heads,
+ qkv_bias=True,
+ qk_scale=None,
+ attn_drop=0.,
+ proj_drop=0.):
+
+ super().__init__()
+ self.dim = dim
+ self.window_size = window_size # Wh, Ww
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.scale = qk_scale or head_dim**-0.5
+
+ # define a parameter table of relative position bias
+ self.relative_position_bias_table = add_parameter(
+ self,
+ paddle.zeros(((2 * window_size[0] - 1) * (2 * window_size[1] - 1),
+ num_heads))) # 2*Wh-1 * 2*Ww-1, nH
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = paddle.arange(self.window_size[0])
+ coords_w = paddle.arange(self.window_size[1])
+ coords = paddle.stack(paddle.meshgrid(
+ [coords_h, coords_w])) # 2, Wh, Ww
+ coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww
+ coords_flatten_1 = coords_flatten.unsqueeze(axis=2)
+ coords_flatten_2 = coords_flatten.unsqueeze(axis=1)
+ relative_coords = coords_flatten_1 - coords_flatten_2
+ relative_coords = relative_coords.transpose(
+ [1, 2, 0]) # Wh*Ww, Wh*Ww, 2
+ relative_coords[:, :, 0] += self.window_size[
+ 0] - 1 # shift to start from 0
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ self.relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+ self.register_buffer("relative_position_index",
+ self.relative_position_index)
+
+ self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias)
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ trunc_normal_(self.relative_position_bias_table)
+ self.softmax = nn.Softmax(axis=-1)
+
+ def forward(self, x, mask=None):
+ """ Forward function.
+ Args:
+ x: input features with shape of (num_windows*B, N, C)
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
+ """
+ B_, N, C = x.shape
+ qkv = self.qkv(x).reshape(
+ [B_, N, 3, self.num_heads, C // self.num_heads]).transpose(
+ [2, 0, 3, 1, 4])
+ q, k, v = qkv[0], qkv[1], qkv[2]
+
+ q = q * self.scale
+ attn = paddle.mm(q, k.transpose([0, 1, 3, 2]))
+
+ index = self.relative_position_index.reshape([-1])
+
+ relative_position_bias = paddle.index_select(
+ self.relative_position_bias_table, index)
+ relative_position_bias = relative_position_bias.reshape([
+ self.window_size[0] * self.window_size[1],
+ self.window_size[0] * self.window_size[1], -1
+ ]) # Wh*Ww,Wh*Ww,nH
+ relative_position_bias = relative_position_bias.transpose(
+ [2, 0, 1]) # nH, Wh*Ww, Wh*Ww
+ attn = attn + relative_position_bias.unsqueeze(0)
+
+ if mask is not None:
+ nW = mask.shape[0]
+ attn = attn.reshape([B_ // nW, nW, self.num_heads, N, N
+ ]) + mask.unsqueeze(1).unsqueeze(0)
+ attn = attn.reshape([-1, self.num_heads, N, N])
+ attn = self.softmax(attn)
+ else:
+ attn = self.softmax(attn)
+
+ attn = self.attn_drop(attn)
+
+ # x = (attn @ v).transpose(1, 2).reshape([B_, N, C])
+ x = paddle.mm(attn, v).transpose([0, 2, 1, 3]).reshape([B_, N, C])
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class SwinTransformerBlock(nn.Layer):
+ """ Swin Transformer Block.
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads.
+ window_size (int): Window size.
+ shift_size (int): Shift size for SW-MSA.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
+ act_layer (nn.Layer, optional): Activation layer. Default: nn.GELU
+ norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm
+ """
+
+ def __init__(self,
+ dim,
+ num_heads,
+ window_size=7,
+ shift_size=0,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop=0.,
+ attn_drop=0.,
+ drop_path=0.,
+ act_layer=nn.GELU,
+ norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.window_size = window_size
+ self.shift_size = shift_size
+ self.mlp_ratio = mlp_ratio
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
+
+ self.norm1 = norm_layer(dim)
+ self.attn = WindowAttention(
+ dim,
+ window_size=to_2tuple(self.window_size),
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ attn_drop=attn_drop,
+ proj_drop=drop)
+
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = Mlp(in_features=dim,
+ hidden_features=mlp_hidden_dim,
+ act_layer=act_layer,
+ drop=drop)
+
+ self.H = None
+ self.W = None
+
+ def forward(self, x, mask_matrix):
+ """ Forward function.
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ mask_matrix: Attention mask for cyclic shift.
+ """
+ B, L, C = x.shape
+ H, W = self.H, self.W
+ assert L == H * W, "input feature has wrong size"
+
+ shortcut = x
+ x = self.norm1(x)
+ x = x.reshape([B, H, W, C])
+
+ # pad feature maps to multiples of window size
+ pad_l = pad_t = 0
+ pad_r = (self.window_size - W % self.window_size) % self.window_size
+ pad_b = (self.window_size - H % self.window_size) % self.window_size
+ x = F.pad(x, [0, pad_l, 0, pad_b, 0, pad_r, 0, pad_t])
+ _, Hp, Wp, _ = x.shape
+
+ # cyclic shift
+ if self.shift_size > 0:
+ shifted_x = paddle.roll(
+ x, shifts=(-self.shift_size, -self.shift_size), axis=(1, 2))
+ attn_mask = mask_matrix
+ else:
+ shifted_x = x
+ attn_mask = None
+
+ # partition windows
+ x_windows = window_partition(
+ shifted_x, self.window_size) # nW*B, window_size, window_size, C
+ x_windows = x_windows.reshape(
+ [-1, self.window_size * self.window_size,
+ C]) # nW*B, window_size*window_size, C
+
+ # W-MSA/SW-MSA
+ attn_windows = self.attn(
+ x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
+
+ # merge windows
+ attn_windows = attn_windows.reshape(
+ [-1, self.window_size, self.window_size, C])
+ shifted_x = window_reverse(attn_windows, self.window_size, Hp,
+ Wp) # B H' W' C
+
+ # reverse cyclic shift
+ if self.shift_size > 0:
+ x = paddle.roll(
+ shifted_x,
+ shifts=(self.shift_size, self.shift_size),
+ axis=(1, 2))
+ else:
+ x = shifted_x
+
+ if pad_r > 0 or pad_b > 0:
+ x = x[:, :H, :W, :]
+
+ x = x.reshape([B, H * W, C])
+
+ # FFN
+ x = shortcut + self.drop_path(x)
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+
+ return x
+
+
+class PatchMerging(nn.Layer):
+ r""" Patch Merging Layer.
+ Args:
+ dim (int): Number of input channels.
+ norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm
+ """
+
+ def __init__(self, dim, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.dim = dim
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias_attr=False)
+ self.norm = norm_layer(4 * dim)
+
+ def forward(self, x, H, W):
+ """ Forward function.
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ """
+ B, L, C = x.shape
+ assert L == H * W, "input feature has wrong size"
+
+ x = x.reshape([B, H, W, C])
+
+ # padding
+ pad_input = (H % 2 == 1) or (W % 2 == 1)
+ if pad_input:
+ x = F.pad(x, [0, 0, 0, W % 2, 0, H % 2])
+
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
+ x = paddle.concat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
+ x = x.reshape([B, H * W // 4, 4 * C]) # B H/2*W/2 4*C
+
+ x = self.norm(x)
+ x = self.reduction(x)
+
+ return x
+
+
+class BasicLayer(nn.Layer):
+ """ A basic Swin Transformer layer for one stage.
+ Args:
+ dim (int): Number of input channels.
+ input_resolution (tuple[int]): Input resolution.
+ depth (int): Number of blocks.
+ num_heads (int): Number of attention heads.
+ window_size (int): Local window size.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
+ norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm
+ downsample (nn.Layer | None, optional): Downsample layer at the end of the layer. Default: None
+ """
+
+ def __init__(self,
+ dim,
+ depth,
+ num_heads,
+ window_size=7,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop=0.,
+ attn_drop=0.,
+ drop_path=0.,
+ norm_layer=nn.LayerNorm,
+ downsample=None):
+ super().__init__()
+ self.window_size = window_size
+ self.shift_size = window_size // 2
+ self.depth = depth
+
+ # build blocks
+ self.blocks = nn.LayerList([
+ SwinTransformerBlock(
+ dim=dim,
+ num_heads=num_heads,
+ window_size=window_size,
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop,
+ attn_drop=attn_drop,
+ drop_path=drop_path[i]
+ if isinstance(drop_path, np.ndarray) else drop_path,
+ norm_layer=norm_layer) for i in range(depth)
+ ])
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(dim=dim, norm_layer=norm_layer)
+ else:
+ self.downsample = None
+
+ def forward(self, x, H, W):
+ """ Forward function.
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ """
+
+ # calculate attention mask for SW-MSA
+ Hp = int(np.ceil(H / self.window_size)) * self.window_size
+ Wp = int(np.ceil(W / self.window_size)) * self.window_size
+ img_mask = paddle.fluid.layers.zeros(
+ [1, Hp, Wp, 1], dtype='float32') # 1 Hp Wp 1
+ h_slices = (slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None))
+ w_slices = (slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None))
+ cnt = 0
+ for h in h_slices:
+ for w in w_slices:
+ img_mask[:, h, w, :] = cnt
+ cnt += 1
+ mask_windows = window_partition(
+ img_mask, self.window_size) # nW, window_size, window_size, 1
+ mask_windows = mask_windows.reshape(
+ [-1, self.window_size * self.window_size])
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ huns = -100.0 * paddle.ones_like(attn_mask)
+ attn_mask = huns * (attn_mask != 0).astype("float32")
+
+ for blk in self.blocks:
+ blk.H, blk.W = H, W
+ x = blk(x, attn_mask)
+ if self.downsample is not None:
+ x_down = self.downsample(x, H, W)
+ Wh, Ww = (H + 1) // 2, (W + 1) // 2
+ return x, H, W, x_down, Wh, Ww
+ else:
+ return x, H, W, x, H, W
+
+
+class PatchEmbed(nn.Layer):
+ """ Image to Patch Embedding
+ Args:
+ patch_size (int): Patch token size. Default: 4.
+ in_chans (int): Number of input image channels. Default: 3.
+ embed_dim (int): Number of linear projection output channels. Default: 96.
+ norm_layer (nn.Layer, optional): Normalization layer. Default: None
+ """
+
+ def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
+ super().__init__()
+ patch_size = to_2tuple(patch_size)
+ self.patch_size = patch_size
+
+ self.in_chans = in_chans
+ self.embed_dim = embed_dim
+
+ self.proj = nn.Conv2D(
+ in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
+ if norm_layer is not None:
+ self.norm = norm_layer(embed_dim)
+ else:
+ self.norm = None
+
+ def forward(self, x):
+ B, C, H, W = x.shape
+ # assert [H, W] == self.img_size[:2], "Input image size ({H}*{W}) doesn't match model ({}*{}).".format(H, W, self.img_size[0], self.img_size[1])
+ if W % self.patch_size[1] != 0:
+ x = F.pad(x, [0, self.patch_size[1] - W % self.patch_size[1], 0, 0])
+ if H % self.patch_size[0] != 0:
+ x = F.pad(x, [0, 0, 0, self.patch_size[0] - H % self.patch_size[0]])
+
+ x = self.proj(x)
+ if self.norm is not None:
+ _, _, Wh, Ww = x.shape
+ x = x.flatten(2).transpose([0, 2, 1])
+ x = self.norm(x)
+ x = x.transpose([0, 2, 1]).reshape([-1, self.embed_dim, Wh, Ww])
+
+ return x
+
+
+@register
+@serializable
+class SwinTransformer(nn.Layer):
+ """ Swin Transformer
+ A PaddlePaddle impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
+ https://arxiv.org/pdf/2103.14030
+
+ Args:
+ img_size (int | tuple(int)): Input image size. Default 224
+ patch_size (int | tuple(int)): Patch size. Default: 4
+ in_chans (int): Number of input image channels. Default: 3
+ num_classes (int): Number of classes for classification head. Default: 1000
+ embed_dim (int): Patch embedding dimension. Default: 96
+ depths (tuple(int)): Depth of each Swin Transformer layer.
+ num_heads (tuple(int)): Number of attention heads in different layers.
+ window_size (int): Window size. Default: 7
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
+ drop_rate (float): Dropout rate. Default: 0
+ attn_drop_rate (float): Attention dropout rate. Default: 0
+ drop_path_rate (float): Stochastic depth rate. Default: 0.1
+ norm_layer (nn.Layer): Normalization layer. Default: nn.LayerNorm.
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True
+ """
+
+ def __init__(self,
+ pretrain_img_size=224,
+ patch_size=4,
+ in_chans=3,
+ embed_dim=96,
+ depths=[2, 2, 6, 2],
+ num_heads=[3, 6, 12, 24],
+ window_size=7,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop_rate=0.,
+ attn_drop_rate=0.,
+ drop_path_rate=0.2,
+ norm_layer=nn.LayerNorm,
+ ape=False,
+ patch_norm=True,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=-1,
+ pretrained=None):
+ super(SwinTransformer, self).__init__()
+
+ self.pretrain_img_size = pretrain_img_size
+ self.num_layers = len(depths)
+ self.embed_dim = embed_dim
+ self.ape = ape
+ self.patch_norm = patch_norm
+ self.out_indices = out_indices
+ self.frozen_stages = frozen_stages
+
+ # split image into non-overlapping patches
+ self.patch_embed = PatchEmbed(
+ patch_size=patch_size,
+ in_chans=in_chans,
+ embed_dim=embed_dim,
+ norm_layer=norm_layer if self.patch_norm else None)
+
+ # absolute position embedding
+ if self.ape:
+ pretrain_img_size = to_2tuple(pretrain_img_size)
+ patch_size = to_2tuple(patch_size)
+ patches_resolution = [
+ pretrain_img_size[0] // patch_size[0],
+ pretrain_img_size[1] // patch_size[1]
+ ]
+
+ self.absolute_pos_embed = add_parameter(
+ self,
+ paddle.zeros((1, embed_dim, patches_resolution[0],
+ patches_resolution[1])))
+ trunc_normal_(self.absolute_pos_embed)
+
+ self.pos_drop = nn.Dropout(p=drop_rate)
+
+ # stochastic depth
+ dpr = np.linspace(0, drop_path_rate,
+ sum(depths)) # stochastic depth decay rule
+
+ # build layers
+ self.layers = nn.LayerList()
+ for i_layer in range(self.num_layers):
+ layer = BasicLayer(
+ dim=int(embed_dim * 2**i_layer),
+ depth=depths[i_layer],
+ num_heads=num_heads[i_layer],
+ window_size=window_size,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop_rate,
+ attn_drop=attn_drop_rate,
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
+ norm_layer=norm_layer,
+ downsample=PatchMerging
+ if (i_layer < self.num_layers - 1) else None)
+ self.layers.append(layer)
+
+ num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)]
+ self.num_features = num_features
+
+ # add a norm layer for each output
+ for i_layer in out_indices:
+ layer = norm_layer(num_features[i_layer])
+ layer_name = f'norm{i_layer}'
+ self.add_sublayer(layer_name, layer)
+
+ self.apply(self._init_weights)
+ self._freeze_stages()
+ if pretrained:
+ if 'http' in pretrained: #URL
+ path = paddle.utils.download.get_weights_path_from_url(
+ pretrained)
+ else: #model in local path
+ path = pretrained
+ self.set_state_dict(paddle.load(path))
+
+ def _freeze_stages(self):
+ if self.frozen_stages >= 0:
+ self.patch_embed.eval()
+ for param in self.patch_embed.parameters():
+ param.requires_grad = False
+
+ if self.frozen_stages >= 1 and self.ape:
+ self.absolute_pos_embed.requires_grad = False
+
+ if self.frozen_stages >= 2:
+ self.pos_drop.eval()
+ for i in range(0, self.frozen_stages - 1):
+ m = self.layers[i]
+ m.eval()
+ for param in m.parameters():
+ param.requires_grad = False
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ zeros_(m.bias)
+ elif isinstance(m, nn.LayerNorm):
+ zeros_(m.bias)
+ ones_(m.weight)
+
+ def forward(self, x):
+ """Forward function."""
+ x = self.patch_embed(x['image'])
+ _, _, Wh, Ww = x.shape
+ if self.ape:
+ # interpolate the position embedding to the corresponding size
+ absolute_pos_embed = F.interpolate(
+ self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
+ x = (x + absolute_pos_embed).flatten(2).transpose([0, 2, 1])
+ else:
+ x = x.flatten(2).transpose([0, 2, 1])
+ x = self.pos_drop(x)
+ outs = []
+ for i in range(self.num_layers):
+ layer = self.layers[i]
+ x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
+ if i in self.out_indices:
+ norm_layer = getattr(self, f'norm{i}')
+ x_out = norm_layer(x_out)
+ out = x_out.reshape((-1, H, W, self.num_features[i])).transpose(
+ (0, 3, 1, 2))
+ outs.append(out)
+
+ return tuple(outs)
+
+ @property
+ def out_shape(self):
+ out_strides = [4, 8, 16, 32]
+ return [
+ ShapeSpec(
+ channels=self.num_features[i], stride=out_strides[i])
+ for i in self.out_indices
+ ]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/vgg.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/vgg.py
new file mode 100644
index 000000000..e05753209
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/backbones/vgg.py
@@ -0,0 +1,210 @@
+from __future__ import division
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn import Conv2D, MaxPool2D
+from ppdet.core.workspace import register, serializable
+from ..shape_spec import ShapeSpec
+
+__all__ = ['VGG']
+
+VGG_cfg = {16: [2, 2, 3, 3, 3], 19: [2, 2, 4, 4, 4]}
+
+
+class ConvBlock(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ groups,
+ pool_size=2,
+ pool_stride=2,
+ pool_padding=0,
+ name=None):
+ super(ConvBlock, self).__init__()
+
+ self.groups = groups
+ self.conv0 = nn.Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1)
+ self.conv_out_list = []
+ for i in range(1, groups):
+ conv_out = self.add_sublayer(
+ 'conv{}'.format(i),
+ Conv2D(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1))
+ self.conv_out_list.append(conv_out)
+
+ self.pool = MaxPool2D(
+ kernel_size=pool_size,
+ stride=pool_stride,
+ padding=pool_padding,
+ ceil_mode=True)
+
+ def forward(self, inputs):
+ out = self.conv0(inputs)
+ out = F.relu(out)
+ for conv_i in self.conv_out_list:
+ out = conv_i(out)
+ out = F.relu(out)
+ pool = self.pool(out)
+ return out, pool
+
+
+class ExtraBlock(nn.Layer):
+ def __init__(self,
+ in_channels,
+ mid_channels,
+ out_channels,
+ padding,
+ stride,
+ kernel_size,
+ name=None):
+ super(ExtraBlock, self).__init__()
+
+ self.conv0 = Conv2D(
+ in_channels=in_channels,
+ out_channels=mid_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self.conv1 = Conv2D(
+ in_channels=mid_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding)
+
+ def forward(self, inputs):
+ out = self.conv0(inputs)
+ out = F.relu(out)
+ out = self.conv1(out)
+ out = F.relu(out)
+ return out
+
+
+class L2NormScale(nn.Layer):
+ def __init__(self, num_channels, scale=1.0):
+ super(L2NormScale, self).__init__()
+ self.scale = self.create_parameter(
+ attr=ParamAttr(initializer=paddle.nn.initializer.Constant(scale)),
+ shape=[num_channels])
+
+ def forward(self, inputs):
+ out = F.normalize(inputs, axis=1, epsilon=1e-10)
+ # out = self.scale.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(
+ # out) * out
+ out = self.scale.unsqueeze(0).unsqueeze(2).unsqueeze(3) * out
+ return out
+
+
+@register
+@serializable
+class VGG(nn.Layer):
+ def __init__(self,
+ depth=16,
+ normalizations=[20., -1, -1, -1, -1, -1],
+ extra_block_filters=[[256, 512, 1, 2, 3], [128, 256, 1, 2, 3],
+ [128, 256, 0, 1, 3],
+ [128, 256, 0, 1, 3]]):
+ super(VGG, self).__init__()
+
+ assert depth in [16, 19], \
+ "depth as 16/19 supported currently, but got {}".format(depth)
+ self.depth = depth
+ self.groups = VGG_cfg[depth]
+ self.normalizations = normalizations
+ self.extra_block_filters = extra_block_filters
+
+ self._out_channels = []
+
+ self.conv_block_0 = ConvBlock(
+ 3, 64, self.groups[0], 2, 2, 0, name="conv1_")
+ self.conv_block_1 = ConvBlock(
+ 64, 128, self.groups[1], 2, 2, 0, name="conv2_")
+ self.conv_block_2 = ConvBlock(
+ 128, 256, self.groups[2], 2, 2, 0, name="conv3_")
+ self.conv_block_3 = ConvBlock(
+ 256, 512, self.groups[3], 2, 2, 0, name="conv4_")
+ self.conv_block_4 = ConvBlock(
+ 512, 512, self.groups[4], 3, 1, 1, name="conv5_")
+ self._out_channels.append(512)
+
+ self.fc6 = Conv2D(
+ in_channels=512,
+ out_channels=1024,
+ kernel_size=3,
+ stride=1,
+ padding=6,
+ dilation=6)
+ self.fc7 = Conv2D(
+ in_channels=1024,
+ out_channels=1024,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self._out_channels.append(1024)
+
+ # extra block
+ self.extra_convs = []
+ last_channels = 1024
+ for i, v in enumerate(self.extra_block_filters):
+ assert len(v) == 5, "extra_block_filters size not fix"
+ extra_conv = self.add_sublayer("conv{}".format(6 + i),
+ ExtraBlock(last_channels, v[0], v[1],
+ v[2], v[3], v[4]))
+ last_channels = v[1]
+ self.extra_convs.append(extra_conv)
+ self._out_channels.append(last_channels)
+
+ self.norms = []
+ for i, n in enumerate(self.normalizations):
+ if n != -1:
+ norm = self.add_sublayer("norm{}".format(i),
+ L2NormScale(
+ self.extra_block_filters[i][1], n))
+ else:
+ norm = None
+ self.norms.append(norm)
+
+ def forward(self, inputs):
+ outputs = []
+
+ conv, pool = self.conv_block_0(inputs['image'])
+ conv, pool = self.conv_block_1(pool)
+ conv, pool = self.conv_block_2(pool)
+ conv, pool = self.conv_block_3(pool)
+ outputs.append(conv)
+
+ conv, pool = self.conv_block_4(pool)
+ out = self.fc6(pool)
+ out = F.relu(out)
+ out = self.fc7(out)
+ out = F.relu(out)
+ outputs.append(out)
+
+ if not self.extra_block_filters:
+ return outputs
+
+ # extra block
+ for extra_conv in self.extra_convs:
+ out = extra_conv(out)
+ outputs.append(out)
+
+ for i, n in enumerate(self.normalizations):
+ if n != -1:
+ outputs[i] = self.norms[i](outputs[i])
+
+ return outputs
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/bbox_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/bbox_utils.py
new file mode 100644
index 000000000..e040ba69b
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/bbox_utils.py
@@ -0,0 +1,753 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import paddle
+import numpy as np
+
+
+def bbox2delta(src_boxes, tgt_boxes, weights):
+ src_w = src_boxes[:, 2] - src_boxes[:, 0]
+ src_h = src_boxes[:, 3] - src_boxes[:, 1]
+ src_ctr_x = src_boxes[:, 0] + 0.5 * src_w
+ src_ctr_y = src_boxes[:, 1] + 0.5 * src_h
+
+ tgt_w = tgt_boxes[:, 2] - tgt_boxes[:, 0]
+ tgt_h = tgt_boxes[:, 3] - tgt_boxes[:, 1]
+ tgt_ctr_x = tgt_boxes[:, 0] + 0.5 * tgt_w
+ tgt_ctr_y = tgt_boxes[:, 1] + 0.5 * tgt_h
+
+ wx, wy, ww, wh = weights
+ dx = wx * (tgt_ctr_x - src_ctr_x) / src_w
+ dy = wy * (tgt_ctr_y - src_ctr_y) / src_h
+ dw = ww * paddle.log(tgt_w / src_w)
+ dh = wh * paddle.log(tgt_h / src_h)
+
+ deltas = paddle.stack((dx, dy, dw, dh), axis=1)
+ return deltas
+
+
+def delta2bbox(deltas, boxes, weights):
+ clip_scale = math.log(1000.0 / 16)
+
+ widths = boxes[:, 2] - boxes[:, 0]
+ heights = boxes[:, 3] - boxes[:, 1]
+ ctr_x = boxes[:, 0] + 0.5 * widths
+ ctr_y = boxes[:, 1] + 0.5 * heights
+
+ wx, wy, ww, wh = weights
+ dx = deltas[:, 0::4] / wx
+ dy = deltas[:, 1::4] / wy
+ dw = deltas[:, 2::4] / ww
+ dh = deltas[:, 3::4] / wh
+ # Prevent sending too large values into paddle.exp()
+ dw = paddle.clip(dw, max=clip_scale)
+ dh = paddle.clip(dh, max=clip_scale)
+
+ pred_ctr_x = dx * widths.unsqueeze(1) + ctr_x.unsqueeze(1)
+ pred_ctr_y = dy * heights.unsqueeze(1) + ctr_y.unsqueeze(1)
+ pred_w = paddle.exp(dw) * widths.unsqueeze(1)
+ pred_h = paddle.exp(dh) * heights.unsqueeze(1)
+
+ pred_boxes = []
+ pred_boxes.append(pred_ctr_x - 0.5 * pred_w)
+ pred_boxes.append(pred_ctr_y - 0.5 * pred_h)
+ pred_boxes.append(pred_ctr_x + 0.5 * pred_w)
+ pred_boxes.append(pred_ctr_y + 0.5 * pred_h)
+ pred_boxes = paddle.stack(pred_boxes, axis=-1)
+
+ return pred_boxes
+
+
+def expand_bbox(bboxes, scale):
+ w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5
+ h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5
+ x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5
+ y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5
+
+ w_half *= scale
+ h_half *= scale
+
+ bboxes_exp = np.zeros(bboxes.shape, dtype=np.float32)
+ bboxes_exp[:, 0] = x_c - w_half
+ bboxes_exp[:, 2] = x_c + w_half
+ bboxes_exp[:, 1] = y_c - h_half
+ bboxes_exp[:, 3] = y_c + h_half
+
+ return bboxes_exp
+
+
+def clip_bbox(boxes, im_shape):
+ h, w = im_shape[0], im_shape[1]
+ x1 = boxes[:, 0].clip(0, w)
+ y1 = boxes[:, 1].clip(0, h)
+ x2 = boxes[:, 2].clip(0, w)
+ y2 = boxes[:, 3].clip(0, h)
+ return paddle.stack([x1, y1, x2, y2], axis=1)
+
+
+def nonempty_bbox(boxes, min_size=0, return_mask=False):
+ w = boxes[:, 2] - boxes[:, 0]
+ h = boxes[:, 3] - boxes[:, 1]
+ mask = paddle.logical_and(h > min_size, w > min_size)
+ if return_mask:
+ return mask
+ keep = paddle.nonzero(mask).flatten()
+ return keep
+
+
+def bbox_area(boxes):
+ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
+
+
+def bbox_overlaps(boxes1, boxes2):
+ """
+ Calculate overlaps between boxes1 and boxes2
+
+ Args:
+ boxes1 (Tensor): boxes with shape [M, 4]
+ boxes2 (Tensor): boxes with shape [N, 4]
+
+ Return:
+ overlaps (Tensor): overlaps between boxes1 and boxes2 with shape [M, N]
+ """
+ M = boxes1.shape[0]
+ N = boxes2.shape[0]
+ if M * N == 0:
+ return paddle.zeros([M, N], dtype='float32')
+ area1 = bbox_area(boxes1)
+ area2 = bbox_area(boxes2)
+
+ xy_max = paddle.minimum(
+ paddle.unsqueeze(boxes1, 1)[:, :, 2:], boxes2[:, 2:])
+ xy_min = paddle.maximum(
+ paddle.unsqueeze(boxes1, 1)[:, :, :2], boxes2[:, :2])
+ width_height = xy_max - xy_min
+ width_height = width_height.clip(min=0)
+ inter = width_height.prod(axis=2)
+
+ overlaps = paddle.where(inter > 0, inter /
+ (paddle.unsqueeze(area1, 1) + area2 - inter),
+ paddle.zeros_like(inter))
+ return overlaps
+
+
+def batch_bbox_overlaps(bboxes1,
+ bboxes2,
+ mode='iou',
+ is_aligned=False,
+ eps=1e-6):
+ """Calculate overlap between two set of bboxes.
+ If ``is_aligned `` is ``False``, then calculate the overlaps between each
+ bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
+ pair of bboxes1 and bboxes2.
+ Args:
+ bboxes1 (Tensor): shape (B, m, 4) in format or empty.
+ bboxes2 (Tensor): shape (B, n, 4) in format or empty.
+ B indicates the batch dim, in shape (B1, B2, ..., Bn).
+ If ``is_aligned `` is ``True``, then m and n must be equal.
+ mode (str): "iou" (intersection over union) or "iof" (intersection over
+ foreground).
+ is_aligned (bool, optional): If True, then m and n must be equal.
+ Default False.
+ eps (float, optional): A value added to the denominator for numerical
+ stability. Default 1e-6.
+ Returns:
+ Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
+ """
+ assert mode in ['iou', 'iof', 'giou'], 'Unsupported mode {}'.format(mode)
+ # Either the boxes are empty or the length of boxes's last dimenstion is 4
+ assert (bboxes1.shape[-1] == 4 or bboxes1.shape[0] == 0)
+ assert (bboxes2.shape[-1] == 4 or bboxes2.shape[0] == 0)
+
+ # Batch dim must be the same
+ # Batch dim: (B1, B2, ... Bn)
+ assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
+ batch_shape = bboxes1.shape[:-2]
+
+ rows = bboxes1.shape[-2] if bboxes1.shape[0] > 0 else 0
+ cols = bboxes2.shape[-2] if bboxes2.shape[0] > 0 else 0
+ if is_aligned:
+ assert rows == cols
+
+ if rows * cols == 0:
+ if is_aligned:
+ return paddle.full(batch_shape + (rows, ), 1)
+ else:
+ return paddle.full(batch_shape + (rows, cols), 1)
+
+ area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
+ area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
+
+ if is_aligned:
+ lt = paddle.maximum(bboxes1[:, :2], bboxes2[:, :2]) # [B, rows, 2]
+ rb = paddle.minimum(bboxes1[:, 2:], bboxes2[:, 2:]) # [B, rows, 2]
+
+ wh = (rb - lt).clip(min=0) # [B, rows, 2]
+ overlap = wh[:, 0] * wh[:, 1]
+
+ if mode in ['iou', 'giou']:
+ union = area1 + area2 - overlap
+ else:
+ union = area1
+ if mode == 'giou':
+ enclosed_lt = paddle.minimum(bboxes1[:, :2], bboxes2[:, :2])
+ enclosed_rb = paddle.maximum(bboxes1[:, 2:], bboxes2[:, 2:])
+ else:
+ lt = paddle.maximum(bboxes1[:, :2].reshape([rows, 1, 2]),
+ bboxes2[:, :2]) # [B, rows, cols, 2]
+ rb = paddle.minimum(bboxes1[:, 2:].reshape([rows, 1, 2]),
+ bboxes2[:, 2:]) # [B, rows, cols, 2]
+
+ wh = (rb - lt).clip(min=0) # [B, rows, cols, 2]
+ overlap = wh[:, :, 0] * wh[:, :, 1]
+
+ if mode in ['iou', 'giou']:
+ union = area1.reshape([rows,1]) \
+ + area2.reshape([1,cols]) - overlap
+ else:
+ union = area1[:, None]
+ if mode == 'giou':
+ enclosed_lt = paddle.minimum(bboxes1[:, :2].reshape([rows, 1, 2]),
+ bboxes2[:, :2])
+ enclosed_rb = paddle.maximum(bboxes1[:, 2:].reshape([rows, 1, 2]),
+ bboxes2[:, 2:])
+
+ eps = paddle.to_tensor([eps])
+ union = paddle.maximum(union, eps)
+ ious = overlap / union
+ if mode in ['iou', 'iof']:
+ return ious
+ # calculate gious
+ enclose_wh = (enclosed_rb - enclosed_lt).clip(min=0)
+ enclose_area = enclose_wh[:, :, 0] * enclose_wh[:, :, 1]
+ enclose_area = paddle.maximum(enclose_area, eps)
+ gious = ious - (enclose_area - union) / enclose_area
+ return 1 - gious
+
+
+def xywh2xyxy(box):
+ x, y, w, h = box
+ x1 = x - w * 0.5
+ y1 = y - h * 0.5
+ x2 = x + w * 0.5
+ y2 = y + h * 0.5
+ return [x1, y1, x2, y2]
+
+
+def make_grid(h, w, dtype):
+ yv, xv = paddle.meshgrid([paddle.arange(h), paddle.arange(w)])
+ return paddle.stack((xv, yv), 2).cast(dtype=dtype)
+
+
+def decode_yolo(box, anchor, downsample_ratio):
+ """decode yolo box
+
+ Args:
+ box (list): [x, y, w, h], all have the shape [b, na, h, w, 1]
+ anchor (list): anchor with the shape [na, 2]
+ downsample_ratio (int): downsample ratio, default 32
+ scale (float): scale, default 1.
+
+ Return:
+ box (list): decoded box, [x, y, w, h], all have the shape [b, na, h, w, 1]
+ """
+ x, y, w, h = box
+ na, grid_h, grid_w = x.shape[1:4]
+ grid = make_grid(grid_h, grid_w, x.dtype).reshape((1, 1, grid_h, grid_w, 2))
+ x1 = (x + grid[:, :, :, :, 0:1]) / grid_w
+ y1 = (y + grid[:, :, :, :, 1:2]) / grid_h
+
+ anchor = paddle.to_tensor(anchor)
+ anchor = paddle.cast(anchor, x.dtype)
+ anchor = anchor.reshape((1, na, 1, 1, 2))
+ w1 = paddle.exp(w) * anchor[:, :, :, :, 0:1] / (downsample_ratio * grid_w)
+ h1 = paddle.exp(h) * anchor[:, :, :, :, 1:2] / (downsample_ratio * grid_h)
+
+ return [x1, y1, w1, h1]
+
+
+def iou_similarity(box1, box2, eps=1e-9):
+ """Calculate iou of box1 and box2
+
+ Args:
+ box1 (Tensor): box with the shape [N, M1, 4]
+ box2 (Tensor): box with the shape [N, M2, 4]
+
+ Return:
+ iou (Tensor): iou between box1 and box2 with the shape [N, M1, M2]
+ """
+ box1 = box1.unsqueeze(2) # [N, M1, 4] -> [N, M1, 1, 4]
+ box2 = box2.unsqueeze(1) # [N, M2, 4] -> [N, 1, M2, 4]
+ px1y1, px2y2 = box1[:, :, :, 0:2], box1[:, :, :, 2:4]
+ gx1y1, gx2y2 = box2[:, :, :, 0:2], box2[:, :, :, 2:4]
+ x1y1 = paddle.maximum(px1y1, gx1y1)
+ x2y2 = paddle.minimum(px2y2, gx2y2)
+ overlap = (x2y2 - x1y1).clip(0).prod(-1)
+ area1 = (px2y2 - px1y1).clip(0).prod(-1)
+ area2 = (gx2y2 - gx1y1).clip(0).prod(-1)
+ union = area1 + area2 - overlap + eps
+ return overlap / union
+
+
+def bbox_iou(box1, box2, giou=False, diou=False, ciou=False, eps=1e-9):
+ """calculate the iou of box1 and box2
+
+ Args:
+ box1 (list): [x, y, w, h], all have the shape [b, na, h, w, 1]
+ box2 (list): [x, y, w, h], all have the shape [b, na, h, w, 1]
+ giou (bool): whether use giou or not, default False
+ diou (bool): whether use diou or not, default False
+ ciou (bool): whether use ciou or not, default False
+ eps (float): epsilon to avoid divide by zero
+
+ Return:
+ iou (Tensor): iou of box1 and box1, with the shape [b, na, h, w, 1]
+ """
+ px1, py1, px2, py2 = box1
+ gx1, gy1, gx2, gy2 = box2
+ x1 = paddle.maximum(px1, gx1)
+ y1 = paddle.maximum(py1, gy1)
+ x2 = paddle.minimum(px2, gx2)
+ y2 = paddle.minimum(py2, gy2)
+
+ overlap = ((x2 - x1).clip(0)) * ((y2 - y1).clip(0))
+
+ area1 = (px2 - px1) * (py2 - py1)
+ area1 = area1.clip(0)
+
+ area2 = (gx2 - gx1) * (gy2 - gy1)
+ area2 = area2.clip(0)
+
+ union = area1 + area2 - overlap + eps
+ iou = overlap / union
+
+ if giou or ciou or diou:
+ # convex w, h
+ cw = paddle.maximum(px2, gx2) - paddle.minimum(px1, gx1)
+ ch = paddle.maximum(py2, gy2) - paddle.minimum(py1, gy1)
+ if giou:
+ c_area = cw * ch + eps
+ return iou - (c_area - union) / c_area
+ else:
+ # convex diagonal squared
+ c2 = cw**2 + ch**2 + eps
+ # center distance
+ rho2 = ((px1 + px2 - gx1 - gx2)**2 + (py1 + py2 - gy1 - gy2)**2) / 4
+ if diou:
+ return iou - rho2 / c2
+ else:
+ w1, h1 = px2 - px1, py2 - py1 + eps
+ w2, h2 = gx2 - gx1, gy2 - gy1 + eps
+ delta = paddle.atan(w1 / h1) - paddle.atan(w2 / h2)
+ v = (4 / math.pi**2) * paddle.pow(delta, 2)
+ alpha = v / (1 + eps - iou + v)
+ alpha.stop_gradient = True
+ return iou - (rho2 / c2 + v * alpha)
+ else:
+ return iou
+
+
+def rect2rbox(bboxes):
+ """
+ :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax)
+ :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle)
+ """
+ bboxes = bboxes.reshape(-1, 4)
+ num_boxes = bboxes.shape[0]
+
+ x_ctr = (bboxes[:, 2] + bboxes[:, 0]) / 2.0
+ y_ctr = (bboxes[:, 3] + bboxes[:, 1]) / 2.0
+ edges1 = np.abs(bboxes[:, 2] - bboxes[:, 0])
+ edges2 = np.abs(bboxes[:, 3] - bboxes[:, 1])
+ angles = np.zeros([num_boxes], dtype=bboxes.dtype)
+
+ inds = edges1 < edges2
+
+ rboxes = np.stack((x_ctr, y_ctr, edges1, edges2, angles), axis=1)
+ rboxes[inds, 2] = edges2[inds]
+ rboxes[inds, 3] = edges1[inds]
+ rboxes[inds, 4] = np.pi / 2.0
+ return rboxes
+
+
+def delta2rbox(rrois,
+ deltas,
+ means=[0, 0, 0, 0, 0],
+ stds=[1, 1, 1, 1, 1],
+ wh_ratio_clip=1e-6):
+ """
+ :param rrois: (cx, cy, w, h, theta)
+ :param deltas: (dx, dy, dw, dh, dtheta)
+ :param means:
+ :param stds:
+ :param wh_ratio_clip:
+ :return:
+ """
+ means = paddle.to_tensor(means)
+ stds = paddle.to_tensor(stds)
+ deltas = paddle.reshape(deltas, [-1, deltas.shape[-1]])
+ denorm_deltas = deltas * stds + means
+
+ dx = denorm_deltas[:, 0]
+ dy = denorm_deltas[:, 1]
+ dw = denorm_deltas[:, 2]
+ dh = denorm_deltas[:, 3]
+ dangle = denorm_deltas[:, 4]
+
+ max_ratio = np.abs(np.log(wh_ratio_clip))
+ dw = paddle.clip(dw, min=-max_ratio, max=max_ratio)
+ dh = paddle.clip(dh, min=-max_ratio, max=max_ratio)
+
+ rroi_x = rrois[:, 0]
+ rroi_y = rrois[:, 1]
+ rroi_w = rrois[:, 2]
+ rroi_h = rrois[:, 3]
+ rroi_angle = rrois[:, 4]
+
+ gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin(
+ rroi_angle) + rroi_x
+ gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos(
+ rroi_angle) + rroi_y
+ gw = rroi_w * dw.exp()
+ gh = rroi_h * dh.exp()
+ ga = np.pi * dangle + rroi_angle
+ ga = (ga + np.pi / 4) % np.pi - np.pi / 4
+ ga = paddle.to_tensor(ga)
+
+ gw = paddle.to_tensor(gw, dtype='float32')
+ gh = paddle.to_tensor(gh, dtype='float32')
+ bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1)
+ return bboxes
+
+
+def rbox2delta(proposals, gt, means=[0, 0, 0, 0, 0], stds=[1, 1, 1, 1, 1]):
+ """
+
+ Args:
+ proposals:
+ gt:
+ means: 1x5
+ stds: 1x5
+
+ Returns:
+
+ """
+ proposals = proposals.astype(np.float64)
+
+ PI = np.pi
+
+ gt_widths = gt[..., 2]
+ gt_heights = gt[..., 3]
+ gt_angle = gt[..., 4]
+
+ proposals_widths = proposals[..., 2]
+ proposals_heights = proposals[..., 3]
+ proposals_angle = proposals[..., 4]
+
+ coord = gt[..., 0:2] - proposals[..., 0:2]
+ dx = (np.cos(proposals[..., 4]) * coord[..., 0] + np.sin(proposals[..., 4])
+ * coord[..., 1]) / proposals_widths
+ dy = (-np.sin(proposals[..., 4]) * coord[..., 0] + np.cos(proposals[..., 4])
+ * coord[..., 1]) / proposals_heights
+ dw = np.log(gt_widths / proposals_widths)
+ dh = np.log(gt_heights / proposals_heights)
+ da = (gt_angle - proposals_angle)
+
+ da = (da + PI / 4) % PI - PI / 4
+ da /= PI
+
+ deltas = np.stack([dx, dy, dw, dh, da], axis=-1)
+ means = np.array(means, dtype=deltas.dtype)
+ stds = np.array(stds, dtype=deltas.dtype)
+ deltas = (deltas - means) / stds
+ deltas = deltas.astype(np.float32)
+ return deltas
+
+
+def bbox_decode(bbox_preds,
+ anchors,
+ means=[0, 0, 0, 0, 0],
+ stds=[1, 1, 1, 1, 1]):
+ """decode bbox from deltas
+ Args:
+ bbox_preds: [N,H,W,5]
+ anchors: [H*W,5]
+ return:
+ bboxes: [N,H,W,5]
+ """
+ means = paddle.to_tensor(means)
+ stds = paddle.to_tensor(stds)
+ num_imgs, H, W, _ = bbox_preds.shape
+ bboxes_list = []
+ for img_id in range(num_imgs):
+ bbox_pred = bbox_preds[img_id]
+ # bbox_pred.shape=[5,H,W]
+ bbox_delta = bbox_pred
+ anchors = paddle.to_tensor(anchors)
+ bboxes = delta2rbox(
+ anchors, bbox_delta, means, stds, wh_ratio_clip=1e-6)
+ bboxes = paddle.reshape(bboxes, [H, W, 5])
+ bboxes_list.append(bboxes)
+ return paddle.stack(bboxes_list, axis=0)
+
+
+def poly2rbox(polys):
+ """
+ poly:[x0,y0,x1,y1,x2,y2,x3,y3]
+ to
+ rotated_boxes:[x_ctr,y_ctr,w,h,angle]
+ """
+ rotated_boxes = []
+ for poly in polys:
+ poly = np.array(poly[:8], dtype=np.float32)
+
+ pt1 = (poly[0], poly[1])
+ pt2 = (poly[2], poly[3])
+ pt3 = (poly[4], poly[5])
+ pt4 = (poly[6], poly[7])
+
+ edge1 = np.sqrt((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) + (pt1[1] - pt2[
+ 1]) * (pt1[1] - pt2[1]))
+ edge2 = np.sqrt((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) + (pt2[1] - pt3[
+ 1]) * (pt2[1] - pt3[1]))
+
+ width = max(edge1, edge2)
+ height = min(edge1, edge2)
+
+ rbox_angle = 0
+ if edge1 > edge2:
+ rbox_angle = np.arctan2(
+ float(pt2[1] - pt1[1]), float(pt2[0] - pt1[0]))
+ elif edge2 >= edge1:
+ rbox_angle = np.arctan2(
+ float(pt4[1] - pt1[1]), float(pt4[0] - pt1[0]))
+
+ def norm_angle(angle, range=[-np.pi / 4, np.pi]):
+ return (angle - range[0]) % range[1] + range[0]
+
+ rbox_angle = norm_angle(rbox_angle)
+
+ x_ctr = float(pt1[0] + pt3[0]) / 2
+ y_ctr = float(pt1[1] + pt3[1]) / 2
+ rotated_box = np.array([x_ctr, y_ctr, width, height, rbox_angle])
+ rotated_boxes.append(rotated_box)
+ ret_rotated_boxes = np.array(rotated_boxes)
+ assert ret_rotated_boxes.shape[1] == 5
+ return ret_rotated_boxes
+
+
+def cal_line_length(point1, point2):
+ import math
+ return math.sqrt(
+ math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
+
+
+def get_best_begin_point_single(coordinate):
+ x1, y1, x2, y2, x3, y3, x4, y4 = coordinate
+ xmin = min(x1, x2, x3, x4)
+ ymin = min(y1, y2, y3, y4)
+ xmax = max(x1, x2, x3, x4)
+ ymax = max(y1, y2, y3, y4)
+ combinate = [[[x1, y1], [x2, y2], [x3, y3], [x4, y4]],
+ [[x4, y4], [x1, y1], [x2, y2], [x3, y3]],
+ [[x3, y3], [x4, y4], [x1, y1], [x2, y2]],
+ [[x2, y2], [x3, y3], [x4, y4], [x1, y1]]]
+ dst_coordinate = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
+ force = 100000000.0
+ force_flag = 0
+ for i in range(4):
+ temp_force = cal_line_length(combinate[i][0], dst_coordinate[0]) \
+ + cal_line_length(combinate[i][1], dst_coordinate[1]) \
+ + cal_line_length(combinate[i][2], dst_coordinate[2]) \
+ + cal_line_length(combinate[i][3], dst_coordinate[3])
+ if temp_force < force:
+ force = temp_force
+ force_flag = i
+ if force_flag != 0:
+ pass
+ return np.array(combinate[force_flag]).reshape(8)
+
+
+def rbox2poly_np(rrects):
+ """
+ rrect:[x_ctr,y_ctr,w,h,angle]
+ to
+ poly:[x0,y0,x1,y1,x2,y2,x3,y3]
+ """
+ polys = []
+ for i in range(rrects.shape[0]):
+ rrect = rrects[i]
+ # x_ctr, y_ctr, width, height, angle = rrect[:5]
+ x_ctr = rrect[0]
+ y_ctr = rrect[1]
+ width = rrect[2]
+ height = rrect[3]
+ angle = rrect[4]
+ tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2
+ rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])
+ R = np.array([[np.cos(angle), -np.sin(angle)],
+ [np.sin(angle), np.cos(angle)]])
+ poly = R.dot(rect)
+ x0, x1, x2, x3 = poly[0, :4] + x_ctr
+ y0, y1, y2, y3 = poly[1, :4] + y_ctr
+ poly = np.array([x0, y0, x1, y1, x2, y2, x3, y3], dtype=np.float32)
+ poly = get_best_begin_point_single(poly)
+ polys.append(poly)
+ polys = np.array(polys)
+ return polys
+
+
+def rbox2poly(rrects):
+ """
+ rrect:[x_ctr,y_ctr,w,h,angle]
+ to
+ poly:[x0,y0,x1,y1,x2,y2,x3,y3]
+ """
+ N = paddle.shape(rrects)[0]
+
+ x_ctr = rrects[:, 0]
+ y_ctr = rrects[:, 1]
+ width = rrects[:, 2]
+ height = rrects[:, 3]
+ angle = rrects[:, 4]
+
+ tl_x, tl_y, br_x, br_y = -width * 0.5, -height * 0.5, width * 0.5, height * 0.5
+
+ normal_rects = paddle.stack(
+ [tl_x, br_x, br_x, tl_x, tl_y, tl_y, br_y, br_y], axis=0)
+ normal_rects = paddle.reshape(normal_rects, [2, 4, N])
+ normal_rects = paddle.transpose(normal_rects, [2, 0, 1])
+
+ sin, cos = paddle.sin(angle), paddle.cos(angle)
+ # M.shape=[N,2,2]
+ M = paddle.stack([cos, -sin, sin, cos], axis=0)
+ M = paddle.reshape(M, [2, 2, N])
+ M = paddle.transpose(M, [2, 0, 1])
+
+ # polys:[N,8]
+ polys = paddle.matmul(M, normal_rects)
+ polys = paddle.transpose(polys, [2, 1, 0])
+ polys = paddle.reshape(polys, [-1, N])
+ polys = paddle.transpose(polys, [1, 0])
+
+ tmp = paddle.stack(
+ [x_ctr, y_ctr, x_ctr, y_ctr, x_ctr, y_ctr, x_ctr, y_ctr], axis=1)
+ polys = polys + tmp
+ return polys
+
+
+def bbox_iou_np_expand(box1, box2, x1y1x2y2=True, eps=1e-16):
+ """
+ Calculate the iou of box1 and box2 with numpy.
+
+ Args:
+ box1 (ndarray): [N, 4]
+ box2 (ndarray): [M, 4], usually N != M
+ x1y1x2y2 (bool): whether in x1y1x2y2 stype, default True
+ eps (float): epsilon to avoid divide by zero
+ Return:
+ iou (ndarray): iou of box1 and box2, [N, M]
+ """
+ N, M = len(box1), len(box2) # usually N != M
+ if x1y1x2y2:
+ b1_x1, b1_y1 = box1[:, 0], box1[:, 1]
+ b1_x2, b1_y2 = box1[:, 2], box1[:, 3]
+ b2_x1, b2_y1 = box2[:, 0], box2[:, 1]
+ b2_x2, b2_y2 = box2[:, 2], box2[:, 3]
+ else:
+ # cxcywh style
+ # Transform from center and width to exact coordinates
+ b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
+ b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
+ b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
+ b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
+
+ # get the coordinates of the intersection rectangle
+ inter_rect_x1 = np.zeros((N, M), dtype=np.float32)
+ inter_rect_y1 = np.zeros((N, M), dtype=np.float32)
+ inter_rect_x2 = np.zeros((N, M), dtype=np.float32)
+ inter_rect_y2 = np.zeros((N, M), dtype=np.float32)
+ for i in range(len(box2)):
+ inter_rect_x1[:, i] = np.maximum(b1_x1, b2_x1[i])
+ inter_rect_y1[:, i] = np.maximum(b1_y1, b2_y1[i])
+ inter_rect_x2[:, i] = np.minimum(b1_x2, b2_x2[i])
+ inter_rect_y2[:, i] = np.minimum(b1_y2, b2_y2[i])
+ # Intersection area
+ inter_area = np.maximum(inter_rect_x2 - inter_rect_x1, 0) * np.maximum(
+ inter_rect_y2 - inter_rect_y1, 0)
+ # Union Area
+ b1_area = np.repeat(
+ ((b1_x2 - b1_x1) * (b1_y2 - b1_y1)).reshape(-1, 1), M, axis=-1)
+ b2_area = np.repeat(
+ ((b2_x2 - b2_x1) * (b2_y2 - b2_y1)).reshape(1, -1), N, axis=0)
+
+ ious = inter_area / (b1_area + b2_area - inter_area + eps)
+ return ious
+
+
+def bbox2distance(points, bbox, max_dis=None, eps=0.1):
+ """Decode bounding box based on distances.
+ Args:
+ points (Tensor): Shape (n, 2), [x, y].
+ bbox (Tensor): Shape (n, 4), "xyxy" format
+ max_dis (float): Upper bound of the distance.
+ eps (float): a small value to ensure target < max_dis, instead <=
+ Returns:
+ Tensor: Decoded distances.
+ """
+ left = points[:, 0] - bbox[:, 0]
+ top = points[:, 1] - bbox[:, 1]
+ right = bbox[:, 2] - points[:, 0]
+ bottom = bbox[:, 3] - points[:, 1]
+ if max_dis is not None:
+ left = left.clip(min=0, max=max_dis - eps)
+ top = top.clip(min=0, max=max_dis - eps)
+ right = right.clip(min=0, max=max_dis - eps)
+ bottom = bottom.clip(min=0, max=max_dis - eps)
+ return paddle.stack([left, top, right, bottom], -1)
+
+
+def distance2bbox(points, distance, max_shape=None):
+ """Decode distance prediction to bounding box.
+ Args:
+ points (Tensor): Shape (n, 2), [x, y].
+ distance (Tensor): Distance from the given point to 4
+ boundaries (left, top, right, bottom).
+ max_shape (tuple): Shape of the image.
+ Returns:
+ Tensor: Decoded bboxes.
+ """
+ x1 = points[:, 0] - distance[:, 0]
+ y1 = points[:, 1] - distance[:, 1]
+ x2 = points[:, 0] + distance[:, 2]
+ y2 = points[:, 1] + distance[:, 3]
+ if max_shape is not None:
+ x1 = x1.clip(min=0, max=max_shape[1])
+ y1 = y1.clip(min=0, max=max_shape[0])
+ x2 = x2.clip(min=0, max=max_shape[1])
+ y2 = y2.clip(min=0, max=max_shape[0])
+ return paddle.stack([x1, y1, x2, y2], -1)
+
+
+def bbox_center(boxes):
+ """Get bbox centers from boxes.
+ Args:
+ boxes (Tensor): boxes with shape (N, 4), "xmin, ymin, xmax, ymax" format.
+ Returns:
+ Tensor: boxes centers with shape (N, 2), "cx, cy" format.
+ """
+ boxes_cx = (boxes[:, 0] + boxes[:, 2]) / 2
+ boxes_cy = (boxes[:, 1] + boxes[:, 3]) / 2
+ return paddle.stack([boxes_cx, boxes_cy], axis=-1)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__init__.py
new file mode 100644
index 000000000..b6b928608
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__init__.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import bbox_head
+from . import mask_head
+from . import yolo_head
+from . import roi_extractor
+from . import ssd_head
+from . import fcos_head
+from . import solov2_head
+from . import ttf_head
+from . import cascade_head
+from . import face_head
+from . import s2anet_head
+from . import keypoint_hrhrnet_head
+from . import centernet_head
+from . import gfl_head
+from . import simota_head
+from . import pico_head
+from . import detr_head
+from . import sparsercnn_head
+from . import tood_head
+
+from .bbox_head import *
+from .mask_head import *
+from .yolo_head import *
+from .roi_extractor import *
+from .ssd_head import *
+from .fcos_head import *
+from .solov2_head import *
+from .ttf_head import *
+from .cascade_head import *
+from .face_head import *
+from .s2anet_head import *
+from .keypoint_hrhrnet_head import *
+from .centernet_head import *
+from .gfl_head import *
+from .simota_head import *
+from .pico_head import *
+from .detr_head import *
+from .sparsercnn_head import *
+from .tood_head import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..9813bf8c0
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/bbox_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/bbox_head.cpython-37.pyc
new file mode 100644
index 000000000..6e35d8b73
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/bbox_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/cascade_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/cascade_head.cpython-37.pyc
new file mode 100644
index 000000000..fc1fc24f8
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/cascade_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/centernet_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/centernet_head.cpython-37.pyc
new file mode 100644
index 000000000..78dfbeb45
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/centernet_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/detr_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/detr_head.cpython-37.pyc
new file mode 100644
index 000000000..d72d46bc6
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/detr_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/face_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/face_head.cpython-37.pyc
new file mode 100644
index 000000000..c77948da9
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/face_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/fcos_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/fcos_head.cpython-37.pyc
new file mode 100644
index 000000000..b2efd5f47
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/fcos_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/gfl_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/gfl_head.cpython-37.pyc
new file mode 100644
index 000000000..2478604a1
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/gfl_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/keypoint_hrhrnet_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/keypoint_hrhrnet_head.cpython-37.pyc
new file mode 100644
index 000000000..fff6a345f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/keypoint_hrhrnet_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/mask_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/mask_head.cpython-37.pyc
new file mode 100644
index 000000000..0dce48ccb
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/mask_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/pico_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/pico_head.cpython-37.pyc
new file mode 100644
index 000000000..ef672f171
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/pico_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/roi_extractor.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/roi_extractor.cpython-37.pyc
new file mode 100644
index 000000000..f432d300c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/roi_extractor.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/s2anet_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/s2anet_head.cpython-37.pyc
new file mode 100644
index 000000000..3cf65d2bd
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/s2anet_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/simota_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/simota_head.cpython-37.pyc
new file mode 100644
index 000000000..302f42806
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/simota_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/solov2_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/solov2_head.cpython-37.pyc
new file mode 100644
index 000000000..ed7b79f62
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/solov2_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/sparsercnn_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/sparsercnn_head.cpython-37.pyc
new file mode 100644
index 000000000..ab0357c75
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/sparsercnn_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/ssd_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/ssd_head.cpython-37.pyc
new file mode 100644
index 000000000..8d47ca572
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/ssd_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/tood_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/tood_head.cpython-37.pyc
new file mode 100644
index 000000000..4fdc6a1d0
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/tood_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/ttf_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/ttf_head.cpython-37.pyc
new file mode 100644
index 000000000..bc43a5621
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/ttf_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/yolo_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/yolo_head.cpython-37.pyc
new file mode 100644
index 000000000..6e73b0ef0
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/__pycache__/yolo_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/bbox_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/bbox_head.py
new file mode 100644
index 000000000..e4d7d6878
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/bbox_head.py
@@ -0,0 +1,376 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import Normal, XavierUniform, KaimingNormal
+from paddle.regularizer import L2Decay
+
+from ppdet.core.workspace import register, create
+from .roi_extractor import RoIAlign
+from ..shape_spec import ShapeSpec
+from ..bbox_utils import bbox2delta
+from ppdet.modeling.layers import ConvNormLayer
+
+__all__ = ['TwoFCHead', 'XConvNormHead', 'BBoxHead']
+
+
+@register
+class TwoFCHead(nn.Layer):
+ """
+ RCNN bbox head with Two fc layers to extract feature
+
+ Args:
+ in_channel (int): Input channel which can be derived by from_config
+ out_channel (int): Output channel
+ resolution (int): Resolution of input feature map, default 7
+ """
+
+ def __init__(self, in_channel=256, out_channel=1024, resolution=7):
+ super(TwoFCHead, self).__init__()
+ self.in_channel = in_channel
+ self.out_channel = out_channel
+ fan = in_channel * resolution * resolution
+ self.fc6 = nn.Linear(
+ in_channel * resolution * resolution,
+ out_channel,
+ weight_attr=paddle.ParamAttr(
+ initializer=XavierUniform(fan_out=fan)))
+ self.fc6.skip_quant = True
+
+ self.fc7 = nn.Linear(
+ out_channel,
+ out_channel,
+ weight_attr=paddle.ParamAttr(initializer=XavierUniform()))
+ self.fc7.skip_quant = True
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ s = input_shape
+ s = s[0] if isinstance(s, (list, tuple)) else s
+ return {'in_channel': s.channels}
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=self.out_channel, )]
+
+ def forward(self, rois_feat):
+ rois_feat = paddle.flatten(rois_feat, start_axis=1, stop_axis=-1)
+ fc6 = self.fc6(rois_feat)
+ fc6 = F.relu(fc6)
+ fc7 = self.fc7(fc6)
+ fc7 = F.relu(fc7)
+ return fc7
+
+
+@register
+class XConvNormHead(nn.Layer):
+ __shared__ = ['norm_type', 'freeze_norm']
+ """
+ RCNN bbox head with serveral convolution layers
+
+ Args:
+ in_channel (int): Input channels which can be derived by from_config
+ num_convs (int): The number of conv layers
+ conv_dim (int): The number of channels for the conv layers
+ out_channel (int): Output channels
+ resolution (int): Resolution of input feature map
+ norm_type (string): Norm type, bn, gn, sync_bn are available,
+ default `gn`
+ freeze_norm (bool): Whether to freeze the norm
+ stage_name (string): Prefix name for conv layer, '' by default
+ """
+
+ def __init__(self,
+ in_channel=256,
+ num_convs=4,
+ conv_dim=256,
+ out_channel=1024,
+ resolution=7,
+ norm_type='gn',
+ freeze_norm=False,
+ stage_name=''):
+ super(XConvNormHead, self).__init__()
+ self.in_channel = in_channel
+ self.num_convs = num_convs
+ self.conv_dim = conv_dim
+ self.out_channel = out_channel
+ self.norm_type = norm_type
+ self.freeze_norm = freeze_norm
+
+ self.bbox_head_convs = []
+ fan = conv_dim * 3 * 3
+ initializer = KaimingNormal(fan_in=fan)
+ for i in range(self.num_convs):
+ in_c = in_channel if i == 0 else conv_dim
+ head_conv_name = stage_name + 'bbox_head_conv{}'.format(i)
+ head_conv = self.add_sublayer(
+ head_conv_name,
+ ConvNormLayer(
+ ch_in=in_c,
+ ch_out=conv_dim,
+ filter_size=3,
+ stride=1,
+ norm_type=self.norm_type,
+ freeze_norm=self.freeze_norm,
+ initializer=initializer))
+ self.bbox_head_convs.append(head_conv)
+
+ fan = conv_dim * resolution * resolution
+ self.fc6 = nn.Linear(
+ conv_dim * resolution * resolution,
+ out_channel,
+ weight_attr=paddle.ParamAttr(
+ initializer=XavierUniform(fan_out=fan)),
+ bias_attr=paddle.ParamAttr(
+ learning_rate=2., regularizer=L2Decay(0.)))
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ s = input_shape
+ s = s[0] if isinstance(s, (list, tuple)) else s
+ return {'in_channel': s.channels}
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=self.out_channel, )]
+
+ def forward(self, rois_feat):
+ for i in range(self.num_convs):
+ rois_feat = F.relu(self.bbox_head_convs[i](rois_feat))
+ rois_feat = paddle.flatten(rois_feat, start_axis=1, stop_axis=-1)
+ fc6 = F.relu(self.fc6(rois_feat))
+ return fc6
+
+
+@register
+class BBoxHead(nn.Layer):
+ __shared__ = ['num_classes']
+ __inject__ = ['bbox_assigner', 'bbox_loss']
+ """
+ RCNN bbox head
+
+ Args:
+ head (nn.Layer): Extract feature in bbox head
+ in_channel (int): Input channel after RoI extractor
+ roi_extractor (object): The module of RoI Extractor
+ bbox_assigner (object): The module of Box Assigner, label and sample the
+ box.
+ with_pool (bool): Whether to use pooling for the RoI feature.
+ num_classes (int): The number of classes
+ bbox_weight (List[float]): The weight to get the decode box
+ """
+
+ def __init__(self,
+ head,
+ in_channel,
+ roi_extractor=RoIAlign().__dict__,
+ bbox_assigner='BboxAssigner',
+ with_pool=False,
+ num_classes=80,
+ bbox_weight=[10., 10., 5., 5.],
+ bbox_loss=None):
+ super(BBoxHead, self).__init__()
+ self.head = head
+ self.roi_extractor = roi_extractor
+ if isinstance(roi_extractor, dict):
+ self.roi_extractor = RoIAlign(**roi_extractor)
+ self.bbox_assigner = bbox_assigner
+
+ self.with_pool = with_pool
+ self.num_classes = num_classes
+ self.bbox_weight = bbox_weight
+ self.bbox_loss = bbox_loss
+
+ self.bbox_score = nn.Linear(
+ in_channel,
+ self.num_classes + 1,
+ weight_attr=paddle.ParamAttr(initializer=Normal(
+ mean=0.0, std=0.01)))
+ self.bbox_score.skip_quant = True
+
+ self.bbox_delta = nn.Linear(
+ in_channel,
+ 4 * self.num_classes,
+ weight_attr=paddle.ParamAttr(initializer=Normal(
+ mean=0.0, std=0.001)))
+ self.bbox_delta.skip_quant = True
+ self.assigned_label = None
+ self.assigned_rois = None
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ roi_pooler = cfg['roi_extractor']
+ assert isinstance(roi_pooler, dict)
+ kwargs = RoIAlign.from_config(cfg, input_shape)
+ roi_pooler.update(kwargs)
+ kwargs = {'input_shape': input_shape}
+ head = create(cfg['head'], **kwargs)
+ return {
+ 'roi_extractor': roi_pooler,
+ 'head': head,
+ 'in_channel': head.out_shape[0].channels
+ }
+
+ def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None):
+ """
+ body_feats (list[Tensor]): Feature maps from backbone
+ rois (list[Tensor]): RoIs generated from RPN module
+ rois_num (Tensor): The number of RoIs in each image
+ inputs (dict{Tensor}): The ground-truth of image
+ """
+ if self.training:
+ rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs)
+ self.assigned_rois = (rois, rois_num)
+ self.assigned_targets = targets
+
+ rois_feat = self.roi_extractor(body_feats, rois, rois_num)
+ bbox_feat = self.head(rois_feat)
+ if self.with_pool:
+ feat = F.adaptive_avg_pool2d(bbox_feat, output_size=1)
+ feat = paddle.squeeze(feat, axis=[2, 3])
+ else:
+ feat = bbox_feat
+ scores = self.bbox_score(feat)
+ deltas = self.bbox_delta(feat)
+
+ if self.training:
+ loss = self.get_loss(scores, deltas, targets, rois,
+ self.bbox_weight)
+ return loss, bbox_feat
+ else:
+ pred = self.get_prediction(scores, deltas)
+ return pred, self.head
+
+ def get_loss(self, scores, deltas, targets, rois, bbox_weight):
+ """
+ scores (Tensor): scores from bbox head outputs
+ deltas (Tensor): deltas from bbox head outputs
+ targets (list[List[Tensor]]): bbox targets containing tgt_labels, tgt_bboxes and tgt_gt_inds
+ rois (List[Tensor]): RoIs generated in each batch
+ """
+ cls_name = 'loss_bbox_cls'
+ reg_name = 'loss_bbox_reg'
+ loss_bbox = {}
+
+ # TODO: better pass args
+ tgt_labels, tgt_bboxes, tgt_gt_inds = targets
+
+ # bbox cls
+ tgt_labels = paddle.concat(tgt_labels) if len(
+ tgt_labels) > 1 else tgt_labels[0]
+ valid_inds = paddle.nonzero(tgt_labels >= 0).flatten()
+ if valid_inds.shape[0] == 0:
+ loss_bbox[cls_name] = paddle.zeros([1], dtype='float32')
+ else:
+ tgt_labels = tgt_labels.cast('int64')
+ tgt_labels.stop_gradient = True
+ loss_bbox_cls = F.cross_entropy(
+ input=scores, label=tgt_labels, reduction='mean')
+ loss_bbox[cls_name] = loss_bbox_cls
+
+ # bbox reg
+
+ cls_agnostic_bbox_reg = deltas.shape[1] == 4
+
+ fg_inds = paddle.nonzero(
+ paddle.logical_and(tgt_labels >= 0, tgt_labels <
+ self.num_classes)).flatten()
+
+ if fg_inds.numel() == 0:
+ loss_bbox[reg_name] = paddle.zeros([1], dtype='float32')
+ return loss_bbox
+
+ if cls_agnostic_bbox_reg:
+ reg_delta = paddle.gather(deltas, fg_inds)
+ else:
+ fg_gt_classes = paddle.gather(tgt_labels, fg_inds)
+
+ reg_row_inds = paddle.arange(fg_gt_classes.shape[0]).unsqueeze(1)
+ reg_row_inds = paddle.tile(reg_row_inds, [1, 4]).reshape([-1, 1])
+
+ reg_col_inds = 4 * fg_gt_classes.unsqueeze(1) + paddle.arange(4)
+
+ reg_col_inds = reg_col_inds.reshape([-1, 1])
+ reg_inds = paddle.concat([reg_row_inds, reg_col_inds], axis=1)
+
+ reg_delta = paddle.gather(deltas, fg_inds)
+ reg_delta = paddle.gather_nd(reg_delta, reg_inds).reshape([-1, 4])
+ rois = paddle.concat(rois) if len(rois) > 1 else rois[0]
+ tgt_bboxes = paddle.concat(tgt_bboxes) if len(
+ tgt_bboxes) > 1 else tgt_bboxes[0]
+
+ reg_target = bbox2delta(rois, tgt_bboxes, bbox_weight)
+ reg_target = paddle.gather(reg_target, fg_inds)
+ reg_target.stop_gradient = True
+
+ if self.bbox_loss is not None:
+ reg_delta = self.bbox_transform(reg_delta)
+ reg_target = self.bbox_transform(reg_target)
+ loss_bbox_reg = self.bbox_loss(
+ reg_delta, reg_target).sum() / tgt_labels.shape[0]
+ loss_bbox_reg *= self.num_classes
+ else:
+ loss_bbox_reg = paddle.abs(reg_delta - reg_target).sum(
+ ) / tgt_labels.shape[0]
+
+ loss_bbox[reg_name] = loss_bbox_reg
+
+ return loss_bbox
+
+ def bbox_transform(self, deltas, weights=[0.1, 0.1, 0.2, 0.2]):
+ wx, wy, ww, wh = weights
+
+ deltas = paddle.reshape(deltas, shape=(0, -1, 4))
+
+ dx = paddle.slice(deltas, axes=[2], starts=[0], ends=[1]) * wx
+ dy = paddle.slice(deltas, axes=[2], starts=[1], ends=[2]) * wy
+ dw = paddle.slice(deltas, axes=[2], starts=[2], ends=[3]) * ww
+ dh = paddle.slice(deltas, axes=[2], starts=[3], ends=[4]) * wh
+
+ dw = paddle.clip(dw, -1.e10, np.log(1000. / 16))
+ dh = paddle.clip(dh, -1.e10, np.log(1000. / 16))
+
+ pred_ctr_x = dx
+ pred_ctr_y = dy
+ pred_w = paddle.exp(dw)
+ pred_h = paddle.exp(dh)
+
+ x1 = pred_ctr_x - 0.5 * pred_w
+ y1 = pred_ctr_y - 0.5 * pred_h
+ x2 = pred_ctr_x + 0.5 * pred_w
+ y2 = pred_ctr_y + 0.5 * pred_h
+
+ x1 = paddle.reshape(x1, shape=(-1, ))
+ y1 = paddle.reshape(y1, shape=(-1, ))
+ x2 = paddle.reshape(x2, shape=(-1, ))
+ y2 = paddle.reshape(y2, shape=(-1, ))
+
+ return paddle.concat([x1, y1, x2, y2])
+
+ def get_prediction(self, score, delta):
+ bbox_prob = F.softmax(score)
+ return delta, bbox_prob
+
+ def get_head(self, ):
+ return self.head
+
+ def get_assigned_targets(self, ):
+ return self.assigned_targets
+
+ def get_assigned_rois(self, ):
+ return self.assigned_rois
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/cascade_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/cascade_head.py
new file mode 100644
index 000000000..935642bd6
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/cascade_head.py
@@ -0,0 +1,283 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import Normal
+
+from ppdet.core.workspace import register
+from .bbox_head import BBoxHead, TwoFCHead, XConvNormHead
+from .roi_extractor import RoIAlign
+from ..shape_spec import ShapeSpec
+from ..bbox_utils import delta2bbox, clip_bbox, nonempty_bbox
+
+__all__ = ['CascadeTwoFCHead', 'CascadeXConvNormHead', 'CascadeHead']
+
+
+@register
+class CascadeTwoFCHead(nn.Layer):
+ __shared__ = ['num_cascade_stage']
+ """
+ Cascade RCNN bbox head with Two fc layers to extract feature
+
+ Args:
+ in_channel (int): Input channel which can be derived by from_config
+ out_channel (int): Output channel
+ resolution (int): Resolution of input feature map, default 7
+ num_cascade_stage (int): The number of cascade stage, default 3
+ """
+
+ def __init__(self,
+ in_channel=256,
+ out_channel=1024,
+ resolution=7,
+ num_cascade_stage=3):
+ super(CascadeTwoFCHead, self).__init__()
+
+ self.in_channel = in_channel
+ self.out_channel = out_channel
+
+ self.head_list = []
+ for stage in range(num_cascade_stage):
+ head_per_stage = self.add_sublayer(
+ str(stage), TwoFCHead(in_channel, out_channel, resolution))
+ self.head_list.append(head_per_stage)
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ s = input_shape
+ s = s[0] if isinstance(s, (list, tuple)) else s
+ return {'in_channel': s.channels}
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=self.out_channel, )]
+
+ def forward(self, rois_feat, stage=0):
+ out = self.head_list[stage](rois_feat)
+ return out
+
+
+@register
+class CascadeXConvNormHead(nn.Layer):
+ __shared__ = ['norm_type', 'freeze_norm', 'num_cascade_stage']
+ """
+ Cascade RCNN bbox head with serveral convolution layers
+
+ Args:
+ in_channel (int): Input channels which can be derived by from_config
+ num_convs (int): The number of conv layers
+ conv_dim (int): The number of channels for the conv layers
+ out_channel (int): Output channels
+ resolution (int): Resolution of input feature map
+ norm_type (string): Norm type, bn, gn, sync_bn are available,
+ default `gn`
+ freeze_norm (bool): Whether to freeze the norm
+ num_cascade_stage (int): The number of cascade stage, default 3
+ """
+
+ def __init__(self,
+ in_channel=256,
+ num_convs=4,
+ conv_dim=256,
+ out_channel=1024,
+ resolution=7,
+ norm_type='gn',
+ freeze_norm=False,
+ num_cascade_stage=3):
+ super(CascadeXConvNormHead, self).__init__()
+ self.in_channel = in_channel
+ self.out_channel = out_channel
+
+ self.head_list = []
+ for stage in range(num_cascade_stage):
+ head_per_stage = self.add_sublayer(
+ str(stage),
+ XConvNormHead(
+ in_channel,
+ num_convs,
+ conv_dim,
+ out_channel,
+ resolution,
+ norm_type,
+ freeze_norm,
+ stage_name='stage{}_'.format(stage)))
+ self.head_list.append(head_per_stage)
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ s = input_shape
+ s = s[0] if isinstance(s, (list, tuple)) else s
+ return {'in_channel': s.channels}
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=self.out_channel, )]
+
+ def forward(self, rois_feat, stage=0):
+ out = self.head_list[stage](rois_feat)
+ return out
+
+
+@register
+class CascadeHead(BBoxHead):
+ __shared__ = ['num_classes', 'num_cascade_stages']
+ __inject__ = ['bbox_assigner', 'bbox_loss']
+ """
+ Cascade RCNN bbox head
+
+ Args:
+ head (nn.Layer): Extract feature in bbox head
+ in_channel (int): Input channel after RoI extractor
+ roi_extractor (object): The module of RoI Extractor
+ bbox_assigner (object): The module of Box Assigner, label and sample the
+ box.
+ num_classes (int): The number of classes
+ bbox_weight (List[List[float]]): The weight to get the decode box and the
+ length of weight is the number of cascade stage
+ num_cascade_stages (int): THe number of stage to refine the box
+ """
+
+ def __init__(self,
+ head,
+ in_channel,
+ roi_extractor=RoIAlign().__dict__,
+ bbox_assigner='BboxAssigner',
+ num_classes=80,
+ bbox_weight=[[10., 10., 5., 5.], [20.0, 20.0, 10.0, 10.0],
+ [30.0, 30.0, 15.0, 15.0]],
+ num_cascade_stages=3,
+ bbox_loss=None):
+ nn.Layer.__init__(self, )
+ self.head = head
+ self.roi_extractor = roi_extractor
+ if isinstance(roi_extractor, dict):
+ self.roi_extractor = RoIAlign(**roi_extractor)
+ self.bbox_assigner = bbox_assigner
+
+ self.num_classes = num_classes
+ self.bbox_weight = bbox_weight
+ self.num_cascade_stages = num_cascade_stages
+ self.bbox_loss = bbox_loss
+
+ self.bbox_score_list = []
+ self.bbox_delta_list = []
+ for i in range(num_cascade_stages):
+ score_name = 'bbox_score_stage{}'.format(i)
+ delta_name = 'bbox_delta_stage{}'.format(i)
+ bbox_score = self.add_sublayer(
+ score_name,
+ nn.Linear(
+ in_channel,
+ self.num_classes + 1,
+ weight_attr=paddle.ParamAttr(initializer=Normal(
+ mean=0.0, std=0.01))))
+
+ bbox_delta = self.add_sublayer(
+ delta_name,
+ nn.Linear(
+ in_channel,
+ 4,
+ weight_attr=paddle.ParamAttr(initializer=Normal(
+ mean=0.0, std=0.001))))
+ self.bbox_score_list.append(bbox_score)
+ self.bbox_delta_list.append(bbox_delta)
+ self.assigned_label = None
+ self.assigned_rois = None
+
+ def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None):
+ """
+ body_feats (list[Tensor]): Feature maps from backbone
+ rois (Tensor): RoIs generated from RPN module
+ rois_num (Tensor): The number of RoIs in each image
+ inputs (dict{Tensor}): The ground-truth of image
+ """
+ targets = []
+ if self.training:
+ rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs)
+ targets_list = [targets]
+ self.assigned_rois = (rois, rois_num)
+ self.assigned_targets = targets
+
+ pred_bbox = None
+ head_out_list = []
+ for i in range(self.num_cascade_stages):
+ if i > 0:
+ rois, rois_num = self._get_rois_from_boxes(pred_bbox,
+ inputs['im_shape'])
+ if self.training:
+ rois, rois_num, targets = self.bbox_assigner(
+ rois, rois_num, inputs, i, is_cascade=True)
+ targets_list.append(targets)
+
+ rois_feat = self.roi_extractor(body_feats, rois, rois_num)
+ bbox_feat = self.head(rois_feat, i)
+ scores = self.bbox_score_list[i](bbox_feat)
+ deltas = self.bbox_delta_list[i](bbox_feat)
+ head_out_list.append([scores, deltas, rois])
+ pred_bbox = self._get_pred_bbox(deltas, rois, self.bbox_weight[i])
+
+ if self.training:
+ loss = {}
+ for stage, value in enumerate(zip(head_out_list, targets_list)):
+ (scores, deltas, rois), targets = value
+ loss_stage = self.get_loss(scores, deltas, targets, rois,
+ self.bbox_weight[stage])
+ for k, v in loss_stage.items():
+ loss[k + "_stage{}".format(
+ stage)] = v / self.num_cascade_stages
+
+ return loss, bbox_feat
+ else:
+ scores, deltas, self.refined_rois = self.get_prediction(
+ head_out_list)
+ return (deltas, scores), self.head
+
+ def _get_rois_from_boxes(self, boxes, im_shape):
+ rois = []
+ for i, boxes_per_image in enumerate(boxes):
+ clip_box = clip_bbox(boxes_per_image, im_shape[i])
+ if self.training:
+ keep = nonempty_bbox(clip_box)
+ if keep.shape[0] == 0:
+ keep = paddle.zeros([1], dtype='int32')
+ clip_box = paddle.gather(clip_box, keep)
+ rois.append(clip_box)
+ rois_num = paddle.concat([paddle.shape(r)[0] for r in rois])
+ return rois, rois_num
+
+ def _get_pred_bbox(self, deltas, proposals, weights):
+ pred_proposals = paddle.concat(proposals) if len(
+ proposals) > 1 else proposals[0]
+ pred_bbox = delta2bbox(deltas, pred_proposals, weights)
+ pred_bbox = paddle.reshape(pred_bbox, [-1, deltas.shape[-1]])
+ num_prop = []
+ for p in proposals:
+ num_prop.append(p.shape[0])
+ return pred_bbox.split(num_prop)
+
+ def get_prediction(self, head_out_list):
+ """
+ head_out_list(List[Tensor]): scores, deltas, rois
+ """
+ pred_list = []
+ scores_list = [F.softmax(head[0]) for head in head_out_list]
+ scores = paddle.add_n(scores_list) / self.num_cascade_stages
+ # Get deltas and rois from the last stage
+ _, deltas, rois = head_out_list[-1]
+ return scores, deltas, rois
+
+ def get_refined_rois(self, ):
+ return self.refined_rois
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/centernet_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/centernet_head.py
new file mode 100644
index 000000000..ce8b5c15d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/centernet_head.py
@@ -0,0 +1,291 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import Constant, Uniform
+from ppdet.core.workspace import register
+from ppdet.modeling.losses import CTFocalLoss, GIoULoss
+
+
+class ConvLayer(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=False):
+ super(ConvLayer, self).__init__()
+ bias_attr = False
+ fan_in = ch_in * kernel_size**2
+ bound = 1 / math.sqrt(fan_in)
+ param_attr = paddle.ParamAttr(initializer=Uniform(-bound, bound))
+ if bias:
+ bias_attr = paddle.ParamAttr(initializer=Constant(0.))
+ self.conv = nn.Conv2D(
+ in_channels=ch_in,
+ out_channels=ch_out,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups,
+ weight_attr=param_attr,
+ bias_attr=bias_attr)
+
+ def forward(self, inputs):
+ out = self.conv(inputs)
+ return out
+
+
+@register
+class CenterNetHead(nn.Layer):
+ """
+ Args:
+ in_channels (int): the channel number of input to CenterNetHead.
+ num_classes (int): the number of classes, 80 (COCO dataset) by default.
+ head_planes (int): the channel number in all head, 256 by default.
+ heatmap_weight (float): the weight of heatmap loss, 1 by default.
+ regress_ltrb (bool): whether to regress left/top/right/bottom or
+ width/height for a box, true by default
+ size_weight (float): the weight of box size loss, 0.1 by default.
+ size_loss (): the type of size regression loss, 'L1 loss' by default.
+ offset_weight (float): the weight of center offset loss, 1 by default.
+ iou_weight (float): the weight of iou head loss, 0 by default.
+ """
+
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ in_channels,
+ num_classes=80,
+ head_planes=256,
+ heatmap_weight=1,
+ regress_ltrb=True,
+ size_weight=0.1,
+ size_loss='L1',
+ offset_weight=1,
+ iou_weight=0):
+ super(CenterNetHead, self).__init__()
+ self.regress_ltrb = regress_ltrb
+ self.weights = {
+ 'heatmap': heatmap_weight,
+ 'size': size_weight,
+ 'offset': offset_weight,
+ 'iou': iou_weight
+ }
+
+ # heatmap head
+ self.heatmap = nn.Sequential(
+ ConvLayer(
+ in_channels, head_planes, kernel_size=3, padding=1, bias=True),
+ nn.ReLU(),
+ ConvLayer(
+ head_planes,
+ num_classes,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=True))
+ with paddle.no_grad():
+ self.heatmap[2].conv.bias[:] = -2.19
+
+ # size(ltrb or wh) head
+ self.size = nn.Sequential(
+ ConvLayer(
+ in_channels, head_planes, kernel_size=3, padding=1, bias=True),
+ nn.ReLU(),
+ ConvLayer(
+ head_planes,
+ 4 if regress_ltrb else 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=True))
+ self.size_loss = size_loss
+
+ # offset head
+ self.offset = nn.Sequential(
+ ConvLayer(
+ in_channels, head_planes, kernel_size=3, padding=1, bias=True),
+ nn.ReLU(),
+ ConvLayer(
+ head_planes, 2, kernel_size=1, stride=1, padding=0, bias=True))
+
+ # iou head (optinal)
+ if iou_weight > 0:
+ self.iou = nn.Sequential(
+ ConvLayer(
+ in_channels,
+ head_planes,
+ kernel_size=3,
+ padding=1,
+ bias=True),
+ nn.ReLU(),
+ ConvLayer(
+ head_planes,
+ 4 if regress_ltrb else 2,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=True))
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ if isinstance(input_shape, (list, tuple)):
+ input_shape = input_shape[0]
+ return {'in_channels': input_shape.channels}
+
+ def forward(self, feat, inputs):
+ heatmap = self.heatmap(feat)
+ size = self.size(feat)
+ offset = self.offset(feat)
+ iou = self.iou(feat) if hasattr(self, 'iou_weight') else None
+
+ if self.training:
+ loss = self.get_loss(
+ inputs, self.weights, heatmap, size, offset, iou=iou)
+ return loss
+ else:
+ heatmap = F.sigmoid(heatmap)
+ head_outs = {'heatmap': heatmap, 'size': size, 'offset': offset}
+ if iou is not None:
+ head_outs.update({'iou': iou})
+ return head_outs
+
+ def get_loss(self, inputs, weights, heatmap, size, offset, iou=None):
+ # heatmap head loss: CTFocalLoss
+ heatmap_target = inputs['heatmap']
+ heatmap = paddle.clip(F.sigmoid(heatmap), 1e-4, 1 - 1e-4)
+ ctfocal_loss = CTFocalLoss()
+ heatmap_loss = ctfocal_loss(heatmap, heatmap_target)
+
+ # size head loss: L1 loss or GIoU loss
+ index = inputs['index']
+ mask = inputs['index_mask']
+ size = paddle.transpose(size, perm=[0, 2, 3, 1])
+ size_n, size_h, size_w, size_c = size.shape
+ size = paddle.reshape(size, shape=[size_n, -1, size_c])
+ index = paddle.unsqueeze(index, 2)
+ batch_inds = list()
+ for i in range(size_n):
+ batch_ind = paddle.full(
+ shape=[1, index.shape[1], 1], fill_value=i, dtype='int64')
+ batch_inds.append(batch_ind)
+ batch_inds = paddle.concat(batch_inds, axis=0)
+ index = paddle.concat(x=[batch_inds, index], axis=2)
+ pos_size = paddle.gather_nd(size, index=index)
+ mask = paddle.unsqueeze(mask, axis=2)
+ size_mask = paddle.expand_as(mask, pos_size)
+ size_mask = paddle.cast(size_mask, dtype=pos_size.dtype)
+ pos_num = size_mask.sum()
+ size_mask.stop_gradient = True
+ if self.size_loss == 'L1':
+ if self.regress_ltrb:
+ size_target = inputs['size']
+ # shape: [bs, max_per_img, 4]
+ else:
+ if inputs['size'].shape[-1] == 2:
+ # inputs['size'] is wh, and regress as wh
+ # shape: [bs, max_per_img, 2]
+ size_target = inputs['size']
+ else:
+ # inputs['size'] is ltrb, but regress as wh
+ # shape: [bs, max_per_img, 4]
+ size_target = inputs['size'][:, :, 0:2] + inputs['size'][:, :, 2:]
+
+ size_target.stop_gradient = True
+ size_loss = F.l1_loss(
+ pos_size * size_mask, size_target * size_mask, reduction='sum')
+ size_loss = size_loss / (pos_num + 1e-4)
+ elif self.size_loss == 'giou':
+ size_target = inputs['bbox_xys']
+ size_target.stop_gradient = True
+ centers_x = (size_target[:, :, 0:1] + size_target[:, :, 2:3]) / 2.0
+ centers_y = (size_target[:, :, 1:2] + size_target[:, :, 3:4]) / 2.0
+ x1 = centers_x - pos_size[:, :, 0:1]
+ y1 = centers_y - pos_size[:, :, 1:2]
+ x2 = centers_x + pos_size[:, :, 2:3]
+ y2 = centers_y + pos_size[:, :, 3:4]
+ pred_boxes = paddle.concat([x1, y1, x2, y2], axis=-1)
+ giou_loss = GIoULoss(reduction='sum')
+ size_loss = giou_loss(
+ pred_boxes * size_mask,
+ size_target * size_mask,
+ iou_weight=size_mask,
+ loc_reweight=None)
+ size_loss = size_loss / (pos_num + 1e-4)
+
+ # offset head loss: L1 loss
+ offset_target = inputs['offset']
+ offset = paddle.transpose(offset, perm=[0, 2, 3, 1])
+ offset_n, offset_h, offset_w, offset_c = offset.shape
+ offset = paddle.reshape(offset, shape=[offset_n, -1, offset_c])
+ pos_offset = paddle.gather_nd(offset, index=index)
+ offset_mask = paddle.expand_as(mask, pos_offset)
+ offset_mask = paddle.cast(offset_mask, dtype=pos_offset.dtype)
+ pos_num = offset_mask.sum()
+ offset_mask.stop_gradient = True
+ offset_target.stop_gradient = True
+ offset_loss = F.l1_loss(
+ pos_offset * offset_mask,
+ offset_target * offset_mask,
+ reduction='sum')
+ offset_loss = offset_loss / (pos_num + 1e-4)
+
+ # iou head loss: GIoU loss
+ if iou is not None:
+ iou = paddle.transpose(iou, perm=[0, 2, 3, 1])
+ iou_n, iou_h, iou_w, iou_c = iou.shape
+ iou = paddle.reshape(iou, shape=[iou_n, -1, iou_c])
+ pos_iou = paddle.gather_nd(iou, index=index)
+ iou_mask = paddle.expand_as(mask, pos_iou)
+ iou_mask = paddle.cast(iou_mask, dtype=pos_iou.dtype)
+ pos_num = iou_mask.sum()
+ iou_mask.stop_gradient = True
+ gt_bbox_xys = inputs['bbox_xys']
+ gt_bbox_xys.stop_gradient = True
+ centers_x = (gt_bbox_xys[:, :, 0:1] + gt_bbox_xys[:, :, 2:3]) / 2.0
+ centers_y = (gt_bbox_xys[:, :, 1:2] + gt_bbox_xys[:, :, 3:4]) / 2.0
+ x1 = centers_x - pos_size[:, :, 0:1]
+ y1 = centers_y - pos_size[:, :, 1:2]
+ x2 = centers_x + pos_size[:, :, 2:3]
+ y2 = centers_y + pos_size[:, :, 3:4]
+ pred_boxes = paddle.concat([x1, y1, x2, y2], axis=-1)
+ giou_loss = GIoULoss(reduction='sum')
+ iou_loss = giou_loss(
+ pred_boxes * iou_mask,
+ gt_bbox_xys * iou_mask,
+ iou_weight=iou_mask,
+ loc_reweight=None)
+ iou_loss = iou_loss / (pos_num + 1e-4)
+
+ losses = {
+ 'heatmap_loss': heatmap_loss,
+ 'size_loss': size_loss,
+ 'offset_loss': offset_loss,
+ }
+ det_loss = weights['heatmap'] * heatmap_loss + weights[
+ 'size'] * size_loss + weights['offset'] * offset_loss
+
+ if iou is not None:
+ losses.update({'iou_loss': iou_loss})
+ det_loss = det_loss + weights['iou'] * iou_loss
+ losses.update({'det_loss': det_loss})
+ return losses
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/detr_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/detr_head.py
new file mode 100644
index 000000000..6ca3499b9
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/detr_head.py
@@ -0,0 +1,364 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register
+import pycocotools.mask as mask_util
+from ..initializer import linear_init_, constant_
+from ..transformers.utils import inverse_sigmoid
+
+__all__ = ['DETRHead', 'DeformableDETRHead']
+
+
+class MLP(nn.Layer):
+ """This code is based on
+ https://github.com/facebookresearch/detr/blob/main/models/detr.py
+ """
+
+ def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
+ super().__init__()
+ self.num_layers = num_layers
+ h = [hidden_dim] * (num_layers - 1)
+ self.layers = nn.LayerList(
+ nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
+
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ for l in self.layers:
+ linear_init_(l)
+
+ def forward(self, x):
+ for i, layer in enumerate(self.layers):
+ x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
+ return x
+
+
+class MultiHeadAttentionMap(nn.Layer):
+ """This code is based on
+ https://github.com/facebookresearch/detr/blob/main/models/segmentation.py
+
+ This is a 2D attention module, which only returns the attention softmax (no multiplication by value)
+ """
+
+ def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0,
+ bias=True):
+ super().__init__()
+ self.num_heads = num_heads
+ self.hidden_dim = hidden_dim
+ self.dropout = nn.Dropout(dropout)
+
+ weight_attr = paddle.ParamAttr(
+ initializer=paddle.nn.initializer.XavierUniform())
+ bias_attr = paddle.framework.ParamAttr(
+ initializer=paddle.nn.initializer.Constant()) if bias else False
+
+ self.q_proj = nn.Linear(query_dim, hidden_dim, weight_attr, bias_attr)
+ self.k_proj = nn.Conv2D(
+ query_dim,
+ hidden_dim,
+ 1,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr)
+
+ self.normalize_fact = float(hidden_dim / self.num_heads)**-0.5
+
+ def forward(self, q, k, mask=None):
+ q = self.q_proj(q)
+ k = self.k_proj(k)
+ bs, num_queries, n, c, h, w = q.shape[0], q.shape[1], self.num_heads,\
+ self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]
+ qh = q.reshape([bs, num_queries, n, c])
+ kh = k.reshape([bs, n, c, h, w])
+ # weights = paddle.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
+ qh = qh.transpose([0, 2, 1, 3]).reshape([-1, num_queries, c])
+ kh = kh.reshape([-1, c, h * w])
+ weights = paddle.bmm(qh * self.normalize_fact, kh).reshape(
+ [bs, n, num_queries, h, w]).transpose([0, 2, 1, 3, 4])
+
+ if mask is not None:
+ weights += mask
+ # fix a potenial bug: https://github.com/facebookresearch/detr/issues/247
+ weights = F.softmax(weights.flatten(3), axis=-1).reshape(weights.shape)
+ weights = self.dropout(weights)
+ return weights
+
+
+class MaskHeadFPNConv(nn.Layer):
+ """This code is based on
+ https://github.com/facebookresearch/detr/blob/main/models/segmentation.py
+
+ Simple convolutional head, using group norm.
+ Upsampling is done using a FPN approach
+ """
+
+ def __init__(self, input_dim, fpn_dims, context_dim, num_groups=8):
+ super().__init__()
+
+ inter_dims = [input_dim,
+ ] + [context_dim // (2**i) for i in range(1, 5)]
+ weight_attr = paddle.ParamAttr(
+ initializer=paddle.nn.initializer.KaimingUniform())
+ bias_attr = paddle.framework.ParamAttr(
+ initializer=paddle.nn.initializer.Constant())
+
+ self.conv0 = self._make_layers(input_dim, input_dim, 3, num_groups,
+ weight_attr, bias_attr)
+ self.conv_inter = nn.LayerList()
+ for in_dims, out_dims in zip(inter_dims[:-1], inter_dims[1:]):
+ self.conv_inter.append(
+ self._make_layers(in_dims, out_dims, 3, num_groups, weight_attr,
+ bias_attr))
+
+ self.conv_out = nn.Conv2D(
+ inter_dims[-1],
+ 1,
+ 3,
+ padding=1,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr)
+
+ self.adapter = nn.LayerList()
+ for i in range(len(fpn_dims)):
+ self.adapter.append(
+ nn.Conv2D(
+ fpn_dims[i],
+ inter_dims[i + 1],
+ 1,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr))
+
+ def _make_layers(self,
+ in_dims,
+ out_dims,
+ kernel_size,
+ num_groups,
+ weight_attr=None,
+ bias_attr=None):
+ return nn.Sequential(
+ nn.Conv2D(
+ in_dims,
+ out_dims,
+ kernel_size,
+ padding=kernel_size // 2,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr),
+ nn.GroupNorm(num_groups, out_dims),
+ nn.ReLU())
+
+ def forward(self, x, bbox_attention_map, fpns):
+ x = paddle.concat([
+ x.tile([bbox_attention_map.shape[1], 1, 1, 1]),
+ bbox_attention_map.flatten(0, 1)
+ ], 1)
+ x = self.conv0(x)
+ for inter_layer, adapter_layer, feat in zip(self.conv_inter[:-1],
+ self.adapter, fpns):
+ feat = adapter_layer(feat).tile(
+ [bbox_attention_map.shape[1], 1, 1, 1])
+ x = inter_layer(x)
+ x = feat + F.interpolate(x, size=feat.shape[-2:])
+
+ x = self.conv_inter[-1](x)
+ x = self.conv_out(x)
+ return x
+
+
+@register
+class DETRHead(nn.Layer):
+ __shared__ = ['num_classes', 'hidden_dim', 'use_focal_loss']
+ __inject__ = ['loss']
+
+ def __init__(self,
+ num_classes=80,
+ hidden_dim=256,
+ nhead=8,
+ num_mlp_layers=3,
+ loss='DETRLoss',
+ fpn_dims=[1024, 512, 256],
+ with_mask_head=False,
+ use_focal_loss=False):
+ super(DETRHead, self).__init__()
+ # add background class
+ self.num_classes = num_classes if use_focal_loss else num_classes + 1
+ self.hidden_dim = hidden_dim
+ self.loss = loss
+ self.with_mask_head = with_mask_head
+ self.use_focal_loss = use_focal_loss
+
+ self.score_head = nn.Linear(hidden_dim, self.num_classes)
+ self.bbox_head = MLP(hidden_dim,
+ hidden_dim,
+ output_dim=4,
+ num_layers=num_mlp_layers)
+ if self.with_mask_head:
+ self.bbox_attention = MultiHeadAttentionMap(hidden_dim, hidden_dim,
+ nhead)
+ self.mask_head = MaskHeadFPNConv(hidden_dim + nhead, fpn_dims,
+ hidden_dim)
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ linear_init_(self.score_head)
+
+ @classmethod
+ def from_config(cls, cfg, hidden_dim, nhead, input_shape):
+
+ return {
+ 'hidden_dim': hidden_dim,
+ 'nhead': nhead,
+ 'fpn_dims': [i.channels for i in input_shape[::-1]][1:]
+ }
+
+ @staticmethod
+ def get_gt_mask_from_polygons(gt_poly, pad_mask):
+ out_gt_mask = []
+ for polygons, padding in zip(gt_poly, pad_mask):
+ height, width = int(padding[:, 0].sum()), int(padding[0, :].sum())
+ masks = []
+ for obj_poly in polygons:
+ rles = mask_util.frPyObjects(obj_poly, height, width)
+ rle = mask_util.merge(rles)
+ masks.append(
+ paddle.to_tensor(mask_util.decode(rle)).astype('float32'))
+ masks = paddle.stack(masks)
+ masks_pad = paddle.zeros(
+ [masks.shape[0], pad_mask.shape[1], pad_mask.shape[2]])
+ masks_pad[:, :height, :width] = masks
+ out_gt_mask.append(masks_pad)
+ return out_gt_mask
+
+ def forward(self, out_transformer, body_feats, inputs=None):
+ r"""
+ Args:
+ out_transformer (Tuple): (feats: [num_levels, batch_size,
+ num_queries, hidden_dim],
+ memory: [batch_size, hidden_dim, h, w],
+ src_proj: [batch_size, h*w, hidden_dim],
+ src_mask: [batch_size, 1, 1, h, w])
+ body_feats (List(Tensor)): list[[B, C, H, W]]
+ inputs (dict): dict(inputs)
+ """
+ feats, memory, src_proj, src_mask = out_transformer
+ outputs_logit = self.score_head(feats)
+ outputs_bbox = F.sigmoid(self.bbox_head(feats))
+ outputs_seg = None
+ if self.with_mask_head:
+ bbox_attention_map = self.bbox_attention(feats[-1], memory,
+ src_mask)
+ fpn_feats = [a for a in body_feats[::-1]][1:]
+ outputs_seg = self.mask_head(src_proj, bbox_attention_map,
+ fpn_feats)
+ outputs_seg = outputs_seg.reshape([
+ feats.shape[1], feats.shape[2], outputs_seg.shape[-2],
+ outputs_seg.shape[-1]
+ ])
+
+ if self.training:
+ assert inputs is not None
+ assert 'gt_bbox' in inputs and 'gt_class' in inputs
+ gt_mask = self.get_gt_mask_from_polygons(
+ inputs['gt_poly'],
+ inputs['pad_mask']) if 'gt_poly' in inputs else None
+ return self.loss(
+ outputs_bbox,
+ outputs_logit,
+ inputs['gt_bbox'],
+ inputs['gt_class'],
+ masks=outputs_seg,
+ gt_mask=gt_mask)
+ else:
+ return (outputs_bbox[-1], outputs_logit[-1], outputs_seg)
+
+
+@register
+class DeformableDETRHead(nn.Layer):
+ __shared__ = ['num_classes', 'hidden_dim']
+ __inject__ = ['loss']
+
+ def __init__(self,
+ num_classes=80,
+ hidden_dim=512,
+ nhead=8,
+ num_mlp_layers=3,
+ loss='DETRLoss'):
+ super(DeformableDETRHead, self).__init__()
+ self.num_classes = num_classes
+ self.hidden_dim = hidden_dim
+ self.nhead = nhead
+ self.loss = loss
+
+ self.score_head = nn.Linear(hidden_dim, self.num_classes)
+ self.bbox_head = MLP(hidden_dim,
+ hidden_dim,
+ output_dim=4,
+ num_layers=num_mlp_layers)
+
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ linear_init_(self.score_head)
+ constant_(self.score_head.bias, -4.595)
+ constant_(self.bbox_head.layers[-1].weight)
+
+ with paddle.no_grad():
+ bias = paddle.zeros_like(self.bbox_head.layers[-1].bias)
+ bias[2:] = -2.0
+ self.bbox_head.layers[-1].bias.set_value(bias)
+
+ @classmethod
+ def from_config(cls, cfg, hidden_dim, nhead, input_shape):
+ return {'hidden_dim': hidden_dim, 'nhead': nhead}
+
+ def forward(self, out_transformer, body_feats, inputs=None):
+ r"""
+ Args:
+ out_transformer (Tuple): (feats: [num_levels, batch_size,
+ num_queries, hidden_dim],
+ memory: [batch_size,
+ \sum_{l=0}^{L-1} H_l \cdot W_l, hidden_dim],
+ reference_points: [batch_size, num_queries, 2])
+ body_feats (List(Tensor)): list[[B, C, H, W]]
+ inputs (dict): dict(inputs)
+ """
+ feats, memory, reference_points = out_transformer
+ reference_points = inverse_sigmoid(reference_points.unsqueeze(0))
+ outputs_bbox = self.bbox_head(feats)
+
+ # It's equivalent to "outputs_bbox[:, :, :, :2] += reference_points",
+ # but the gradient is wrong in paddle.
+ outputs_bbox = paddle.concat(
+ [
+ outputs_bbox[:, :, :, :2] + reference_points,
+ outputs_bbox[:, :, :, 2:]
+ ],
+ axis=-1)
+
+ outputs_bbox = F.sigmoid(outputs_bbox)
+ outputs_logit = self.score_head(feats)
+
+ if self.training:
+ assert inputs is not None
+ assert 'gt_bbox' in inputs and 'gt_class' in inputs
+
+ return self.loss(outputs_bbox, outputs_logit, inputs['gt_bbox'],
+ inputs['gt_class'])
+ else:
+ return (outputs_bbox[-1], outputs_logit[-1], None)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/face_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/face_head.py
new file mode 100644
index 000000000..bb51f2eb9
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/face_head.py
@@ -0,0 +1,110 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+
+from ppdet.core.workspace import register
+from ..layers import AnchorGeneratorSSD
+
+
+@register
+class FaceHead(nn.Layer):
+ """
+ Head block for Face detection network
+
+ Args:
+ num_classes (int): Number of output classes.
+ in_channels (int): Number of input channels.
+ anchor_generator(object): instance of anchor genertor method.
+ kernel_size (int): kernel size of Conv2D in FaceHead.
+ padding (int): padding of Conv2D in FaceHead.
+ conv_decay (float): norm_decay (float): weight decay for conv layer weights.
+ loss (object): loss of face detection model.
+ """
+ __shared__ = ['num_classes']
+ __inject__ = ['anchor_generator', 'loss']
+
+ def __init__(self,
+ num_classes=80,
+ in_channels=[96, 96],
+ anchor_generator=AnchorGeneratorSSD().__dict__,
+ kernel_size=3,
+ padding=1,
+ conv_decay=0.,
+ loss='SSDLoss'):
+ super(FaceHead, self).__init__()
+ # add background class
+ self.num_classes = num_classes + 1
+ self.in_channels = in_channels
+ self.anchor_generator = anchor_generator
+ self.loss = loss
+
+ if isinstance(anchor_generator, dict):
+ self.anchor_generator = AnchorGeneratorSSD(**anchor_generator)
+
+ self.num_priors = self.anchor_generator.num_priors
+ self.box_convs = []
+ self.score_convs = []
+ for i, num_prior in enumerate(self.num_priors):
+ box_conv_name = "boxes{}".format(i)
+ box_conv = self.add_sublayer(
+ box_conv_name,
+ nn.Conv2D(
+ in_channels=self.in_channels[i],
+ out_channels=num_prior * 4,
+ kernel_size=kernel_size,
+ padding=padding))
+ self.box_convs.append(box_conv)
+
+ score_conv_name = "scores{}".format(i)
+ score_conv = self.add_sublayer(
+ score_conv_name,
+ nn.Conv2D(
+ in_channels=self.in_channels[i],
+ out_channels=num_prior * self.num_classes,
+ kernel_size=kernel_size,
+ padding=padding))
+ self.score_convs.append(score_conv)
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape], }
+
+ def forward(self, feats, image, gt_bbox=None, gt_class=None):
+ box_preds = []
+ cls_scores = []
+ prior_boxes = []
+ for feat, box_conv, score_conv in zip(feats, self.box_convs,
+ self.score_convs):
+ box_pred = box_conv(feat)
+ box_pred = paddle.transpose(box_pred, [0, 2, 3, 1])
+ box_pred = paddle.reshape(box_pred, [0, -1, 4])
+ box_preds.append(box_pred)
+
+ cls_score = score_conv(feat)
+ cls_score = paddle.transpose(cls_score, [0, 2, 3, 1])
+ cls_score = paddle.reshape(cls_score, [0, -1, self.num_classes])
+ cls_scores.append(cls_score)
+
+ prior_boxes = self.anchor_generator(feats, image)
+
+ if self.training:
+ return self.get_loss(box_preds, cls_scores, gt_bbox, gt_class,
+ prior_boxes)
+ else:
+ return (box_preds, cls_scores), prior_boxes
+
+ def get_loss(self, boxes, scores, gt_bbox, gt_class, prior_boxes):
+ return self.loss(boxes, scores, gt_bbox, gt_class, prior_boxes)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/fcos_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/fcos_head.py
new file mode 100644
index 000000000..1d61feed6
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/fcos_head.py
@@ -0,0 +1,258 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import Normal, Constant
+
+from ppdet.core.workspace import register
+from ppdet.modeling.layers import ConvNormLayer
+
+
+class ScaleReg(nn.Layer):
+ """
+ Parameter for scaling the regression outputs.
+ """
+
+ def __init__(self):
+ super(ScaleReg, self).__init__()
+ self.scale_reg = self.create_parameter(
+ shape=[1],
+ attr=ParamAttr(initializer=Constant(value=1.)),
+ dtype="float32")
+
+ def forward(self, inputs):
+ out = inputs * self.scale_reg
+ return out
+
+
+@register
+class FCOSFeat(nn.Layer):
+ """
+ FCOSFeat of FCOS
+
+ Args:
+ feat_in (int): The channel number of input Tensor.
+ feat_out (int): The channel number of output Tensor.
+ num_convs (int): The convolution number of the FCOSFeat.
+ norm_type (str): Normalization type, 'bn'/'sync_bn'/'gn'.
+ use_dcn (bool): Whether to use dcn in tower or not.
+ """
+
+ def __init__(self,
+ feat_in=256,
+ feat_out=256,
+ num_convs=4,
+ norm_type='bn',
+ use_dcn=False):
+ super(FCOSFeat, self).__init__()
+ self.num_convs = num_convs
+ self.norm_type = norm_type
+ self.cls_subnet_convs = []
+ self.reg_subnet_convs = []
+ for i in range(self.num_convs):
+ in_c = feat_in if i == 0 else feat_out
+
+ cls_conv_name = 'fcos_head_cls_tower_conv_{}'.format(i)
+ cls_conv = self.add_sublayer(
+ cls_conv_name,
+ ConvNormLayer(
+ ch_in=in_c,
+ ch_out=feat_out,
+ filter_size=3,
+ stride=1,
+ norm_type=norm_type,
+ use_dcn=use_dcn,
+ bias_on=True,
+ lr_scale=2.))
+ self.cls_subnet_convs.append(cls_conv)
+
+ reg_conv_name = 'fcos_head_reg_tower_conv_{}'.format(i)
+ reg_conv = self.add_sublayer(
+ reg_conv_name,
+ ConvNormLayer(
+ ch_in=in_c,
+ ch_out=feat_out,
+ filter_size=3,
+ stride=1,
+ norm_type=norm_type,
+ use_dcn=use_dcn,
+ bias_on=True,
+ lr_scale=2.))
+ self.reg_subnet_convs.append(reg_conv)
+
+ def forward(self, fpn_feat):
+ cls_feat = fpn_feat
+ reg_feat = fpn_feat
+ for i in range(self.num_convs):
+ cls_feat = F.relu(self.cls_subnet_convs[i](cls_feat))
+ reg_feat = F.relu(self.reg_subnet_convs[i](reg_feat))
+ return cls_feat, reg_feat
+
+
+@register
+class FCOSHead(nn.Layer):
+ """
+ FCOSHead
+ Args:
+ fcos_feat (object): Instance of 'FCOSFeat'
+ num_classes (int): Number of classes
+ fpn_stride (list): The stride of each FPN Layer
+ prior_prob (float): Used to set the bias init for the class prediction layer
+ fcos_loss (object): Instance of 'FCOSLoss'
+ norm_reg_targets (bool): Normalization the regression target if true
+ centerness_on_reg (bool): The prediction of centerness on regression or clssification branch
+ """
+ __inject__ = ['fcos_feat', 'fcos_loss']
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ fcos_feat,
+ num_classes=80,
+ fpn_stride=[8, 16, 32, 64, 128],
+ prior_prob=0.01,
+ fcos_loss='FCOSLoss',
+ norm_reg_targets=True,
+ centerness_on_reg=True):
+ super(FCOSHead, self).__init__()
+ self.fcos_feat = fcos_feat
+ self.num_classes = num_classes
+ self.fpn_stride = fpn_stride
+ self.prior_prob = prior_prob
+ self.fcos_loss = fcos_loss
+ self.norm_reg_targets = norm_reg_targets
+ self.centerness_on_reg = centerness_on_reg
+
+ conv_cls_name = "fcos_head_cls"
+ bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)
+ self.fcos_head_cls = self.add_sublayer(
+ conv_cls_name,
+ nn.Conv2D(
+ in_channels=256,
+ out_channels=self.num_classes,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(
+ initializer=Constant(value=bias_init_value))))
+
+ conv_reg_name = "fcos_head_reg"
+ self.fcos_head_reg = self.add_sublayer(
+ conv_reg_name,
+ nn.Conv2D(
+ in_channels=256,
+ out_channels=4,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(initializer=Constant(value=0))))
+
+ conv_centerness_name = "fcos_head_centerness"
+ self.fcos_head_centerness = self.add_sublayer(
+ conv_centerness_name,
+ nn.Conv2D(
+ in_channels=256,
+ out_channels=1,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(initializer=Constant(value=0))))
+
+ self.scales_regs = []
+ for i in range(len(self.fpn_stride)):
+ lvl = int(math.log(int(self.fpn_stride[i]), 2))
+ feat_name = 'p{}_feat'.format(lvl)
+ scale_reg = self.add_sublayer(feat_name, ScaleReg())
+ self.scales_regs.append(scale_reg)
+
+ def _compute_locations_by_level(self, fpn_stride, feature):
+ """
+ Compute locations of anchor points of each FPN layer
+ Args:
+ fpn_stride (int): The stride of current FPN feature map
+ feature (Tensor): Tensor of current FPN feature map
+ Return:
+ Anchor points locations of current FPN feature map
+ """
+ shape_fm = paddle.shape(feature)
+ shape_fm.stop_gradient = True
+ h, w = shape_fm[2], shape_fm[3]
+ shift_x = paddle.arange(0, w * fpn_stride, fpn_stride)
+ shift_y = paddle.arange(0, h * fpn_stride, fpn_stride)
+ shift_x = paddle.unsqueeze(shift_x, axis=0)
+ shift_y = paddle.unsqueeze(shift_y, axis=1)
+ shift_x = paddle.expand(shift_x, shape=[h, w])
+ shift_y = paddle.expand(shift_y, shape=[h, w])
+ shift_x.stop_gradient = True
+ shift_y.stop_gradient = True
+ shift_x = paddle.reshape(shift_x, shape=[-1])
+ shift_y = paddle.reshape(shift_y, shape=[-1])
+ location = paddle.stack(
+ [shift_x, shift_y], axis=-1) + float(fpn_stride) / 2
+ location.stop_gradient = True
+ return location
+
+ def forward(self, fpn_feats, is_training):
+ assert len(fpn_feats) == len(
+ self.fpn_stride
+ ), "The size of fpn_feats is not equal to size of fpn_stride"
+ cls_logits_list = []
+ bboxes_reg_list = []
+ centerness_list = []
+ for scale_reg, fpn_stride, fpn_feat in zip(self.scales_regs,
+ self.fpn_stride, fpn_feats):
+ fcos_cls_feat, fcos_reg_feat = self.fcos_feat(fpn_feat)
+ cls_logits = self.fcos_head_cls(fcos_cls_feat)
+ bbox_reg = scale_reg(self.fcos_head_reg(fcos_reg_feat))
+ if self.centerness_on_reg:
+ centerness = self.fcos_head_centerness(fcos_reg_feat)
+ else:
+ centerness = self.fcos_head_centerness(fcos_cls_feat)
+ if self.norm_reg_targets:
+ bbox_reg = F.relu(bbox_reg)
+ if not is_training:
+ bbox_reg = bbox_reg * fpn_stride
+ else:
+ bbox_reg = paddle.exp(bbox_reg)
+ cls_logits_list.append(cls_logits)
+ bboxes_reg_list.append(bbox_reg)
+ centerness_list.append(centerness)
+
+ if not is_training:
+ locations_list = []
+ for fpn_stride, feature in zip(self.fpn_stride, fpn_feats):
+ location = self._compute_locations_by_level(fpn_stride, feature)
+ locations_list.append(location)
+
+ return locations_list, cls_logits_list, bboxes_reg_list, centerness_list
+ else:
+ return cls_logits_list, bboxes_reg_list, centerness_list
+
+ def get_loss(self, fcos_head_outs, tag_labels, tag_bboxes, tag_centerness):
+ cls_logits, bboxes_reg, centerness = fcos_head_outs
+ return self.fcos_loss(cls_logits, bboxes_reg, centerness, tag_labels,
+ tag_bboxes, tag_centerness)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/gfl_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/gfl_head.py
new file mode 100644
index 000000000..17e87a4ef
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/gfl_head.py
@@ -0,0 +1,480 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on:
+# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/gfl_head.py
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import numpy as np
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import Normal, Constant
+
+from ppdet.core.workspace import register
+from ppdet.modeling.layers import ConvNormLayer
+from ppdet.modeling.bbox_utils import distance2bbox, bbox2distance
+from ppdet.data.transform.atss_assigner import bbox_overlaps
+
+
+class ScaleReg(nn.Layer):
+ """
+ Parameter for scaling the regression outputs.
+ """
+
+ def __init__(self):
+ super(ScaleReg, self).__init__()
+ self.scale_reg = self.create_parameter(
+ shape=[1],
+ attr=ParamAttr(initializer=Constant(value=1.)),
+ dtype="float32")
+
+ def forward(self, inputs):
+ out = inputs * self.scale_reg
+ return out
+
+
+class Integral(nn.Layer):
+ """A fixed layer for calculating integral result from distribution.
+ This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
+ P(y_i) denotes the softmax vector that represents the discrete distribution
+ y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
+
+ Args:
+ reg_max (int): The maximal value of the discrete set. Default: 16. You
+ may want to reset it according to your new dataset or related
+ settings.
+ """
+
+ def __init__(self, reg_max=16):
+ super(Integral, self).__init__()
+ self.reg_max = reg_max
+ self.register_buffer('project',
+ paddle.linspace(0, self.reg_max, self.reg_max + 1))
+
+ def forward(self, x):
+ """Forward feature from the regression head to get integral result of
+ bounding box location.
+ Args:
+ x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
+ n is self.reg_max.
+ Returns:
+ x (Tensor): Integral result of box locations, i.e., distance
+ offsets from the box center in four directions, shape (N, 4).
+ """
+ x = F.softmax(x.reshape([-1, self.reg_max + 1]), axis=1)
+ x = F.linear(x, self.project).reshape([-1, 4])
+ return x
+
+
+@register
+class DGQP(nn.Layer):
+ """Distribution-Guided Quality Predictor of GFocal head
+
+ Args:
+ reg_topk (int): top-k statistics of distribution to guide LQE
+ reg_channels (int): hidden layer unit to generate LQE
+ add_mean (bool): Whether to calculate the mean of top-k statistics
+ """
+
+ def __init__(self, reg_topk=4, reg_channels=64, add_mean=True):
+ super(DGQP, self).__init__()
+ self.reg_topk = reg_topk
+ self.reg_channels = reg_channels
+ self.add_mean = add_mean
+ self.total_dim = reg_topk
+ if add_mean:
+ self.total_dim += 1
+ self.reg_conv1 = self.add_sublayer(
+ 'dgqp_reg_conv1',
+ nn.Conv2D(
+ in_channels=4 * self.total_dim,
+ out_channels=self.reg_channels,
+ kernel_size=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(initializer=Constant(value=0))))
+ self.reg_conv2 = self.add_sublayer(
+ 'dgqp_reg_conv2',
+ nn.Conv2D(
+ in_channels=self.reg_channels,
+ out_channels=1,
+ kernel_size=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(initializer=Constant(value=0))))
+
+ def forward(self, x):
+ """Forward feature from the regression head to get integral result of
+ bounding box location.
+ Args:
+ x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
+ n is self.reg_max.
+ Returns:
+ x (Tensor): Integral result of box locations, i.e., distance
+ offsets from the box center in four directions, shape (N, 4).
+ """
+ N, _, H, W = x.shape[:]
+ prob = F.softmax(x.reshape([N, 4, -1, H, W]), axis=2)
+ prob_topk, _ = prob.topk(self.reg_topk, axis=2)
+ if self.add_mean:
+ stat = paddle.concat(
+ [prob_topk, prob_topk.mean(
+ axis=2, keepdim=True)], axis=2)
+ else:
+ stat = prob_topk
+ y = F.relu(self.reg_conv1(stat.reshape([N, -1, H, W])))
+ y = F.sigmoid(self.reg_conv2(y))
+ return y
+
+
+@register
+class GFLHead(nn.Layer):
+ """
+ GFLHead
+ Args:
+ conv_feat (object): Instance of 'FCOSFeat'
+ num_classes (int): Number of classes
+ fpn_stride (list): The stride of each FPN Layer
+ prior_prob (float): Used to set the bias init for the class prediction layer
+ loss_class (object): Instance of QualityFocalLoss.
+ loss_dfl (object): Instance of DistributionFocalLoss.
+ loss_bbox (object): Instance of bbox loss.
+ reg_max: Max value of integral set :math: `{0, ..., reg_max}`
+ n QFL setting. Default: 16.
+ """
+ __inject__ = [
+ 'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox', 'nms'
+ ]
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ conv_feat='FCOSFeat',
+ dgqp_module=None,
+ num_classes=80,
+ fpn_stride=[8, 16, 32, 64, 128],
+ prior_prob=0.01,
+ loss_class='QualityFocalLoss',
+ loss_dfl='DistributionFocalLoss',
+ loss_bbox='GIoULoss',
+ reg_max=16,
+ feat_in_chan=256,
+ nms=None,
+ nms_pre=1000,
+ cell_offset=0):
+ super(GFLHead, self).__init__()
+ self.conv_feat = conv_feat
+ self.dgqp_module = dgqp_module
+ self.num_classes = num_classes
+ self.fpn_stride = fpn_stride
+ self.prior_prob = prior_prob
+ self.loss_qfl = loss_class
+ self.loss_dfl = loss_dfl
+ self.loss_bbox = loss_bbox
+ self.reg_max = reg_max
+ self.feat_in_chan = feat_in_chan
+ self.nms = nms
+ self.nms_pre = nms_pre
+ self.cell_offset = cell_offset
+ self.use_sigmoid = self.loss_qfl.use_sigmoid
+ if self.use_sigmoid:
+ self.cls_out_channels = self.num_classes
+ else:
+ self.cls_out_channels = self.num_classes + 1
+
+ conv_cls_name = "gfl_head_cls"
+ bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)
+ self.gfl_head_cls = self.add_sublayer(
+ conv_cls_name,
+ nn.Conv2D(
+ in_channels=self.feat_in_chan,
+ out_channels=self.cls_out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(
+ initializer=Constant(value=bias_init_value))))
+
+ conv_reg_name = "gfl_head_reg"
+ self.gfl_head_reg = self.add_sublayer(
+ conv_reg_name,
+ nn.Conv2D(
+ in_channels=self.feat_in_chan,
+ out_channels=4 * (self.reg_max + 1),
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(initializer=Constant(value=0))))
+
+ self.scales_regs = []
+ for i in range(len(self.fpn_stride)):
+ lvl = int(math.log(int(self.fpn_stride[i]), 2))
+ feat_name = 'p{}_feat'.format(lvl)
+ scale_reg = self.add_sublayer(feat_name, ScaleReg())
+ self.scales_regs.append(scale_reg)
+
+ self.distribution_project = Integral(self.reg_max)
+
+ def forward(self, fpn_feats):
+ assert len(fpn_feats) == len(
+ self.fpn_stride
+ ), "The size of fpn_feats is not equal to size of fpn_stride"
+ cls_logits_list = []
+ bboxes_reg_list = []
+ for scale_reg, fpn_feat in zip(self.scales_regs, fpn_feats):
+ conv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat)
+ cls_logits = self.gfl_head_cls(conv_cls_feat)
+ bbox_reg = scale_reg(self.gfl_head_reg(conv_reg_feat))
+ if self.dgqp_module:
+ quality_score = self.dgqp_module(bbox_reg)
+ cls_logits = F.sigmoid(cls_logits) * quality_score
+ if not self.training:
+ cls_logits = F.sigmoid(cls_logits.transpose([0, 2, 3, 1]))
+ bbox_reg = bbox_reg.transpose([0, 2, 3, 1])
+ cls_logits_list.append(cls_logits)
+ bboxes_reg_list.append(bbox_reg)
+
+ return (cls_logits_list, bboxes_reg_list)
+
+ def _images_to_levels(self, target, num_level_anchors):
+ """
+ Convert targets by image to targets by feature level.
+ """
+ level_targets = []
+ start = 0
+ for n in num_level_anchors:
+ end = start + n
+ level_targets.append(target[:, start:end].squeeze(0))
+ start = end
+ return level_targets
+
+ def _grid_cells_to_center(self, grid_cells):
+ """
+ Get center location of each gird cell
+ Args:
+ grid_cells: grid cells of a feature map
+ Returns:
+ center points
+ """
+ cells_cx = (grid_cells[:, 2] + grid_cells[:, 0]) / 2
+ cells_cy = (grid_cells[:, 3] + grid_cells[:, 1]) / 2
+ return paddle.stack([cells_cx, cells_cy], axis=-1)
+
+ def get_loss(self, gfl_head_outs, gt_meta):
+ cls_logits, bboxes_reg = gfl_head_outs
+ num_level_anchors = [
+ featmap.shape[-2] * featmap.shape[-1] for featmap in cls_logits
+ ]
+ grid_cells_list = self._images_to_levels(gt_meta['grid_cells'],
+ num_level_anchors)
+ labels_list = self._images_to_levels(gt_meta['labels'],
+ num_level_anchors)
+ label_weights_list = self._images_to_levels(gt_meta['label_weights'],
+ num_level_anchors)
+ bbox_targets_list = self._images_to_levels(gt_meta['bbox_targets'],
+ num_level_anchors)
+ num_total_pos = sum(gt_meta['pos_num'])
+ try:
+ num_total_pos = paddle.distributed.all_reduce(num_total_pos.clone(
+ )) / paddle.distributed.get_world_size()
+ except:
+ num_total_pos = max(num_total_pos, 1)
+
+ loss_bbox_list, loss_dfl_list, loss_qfl_list, avg_factor = [], [], [], []
+ for cls_score, bbox_pred, grid_cells, labels, label_weights, bbox_targets, stride in zip(
+ cls_logits, bboxes_reg, grid_cells_list, labels_list,
+ label_weights_list, bbox_targets_list, self.fpn_stride):
+ grid_cells = grid_cells.reshape([-1, 4])
+ cls_score = cls_score.transpose([0, 2, 3, 1]).reshape(
+ [-1, self.cls_out_channels])
+ bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
+ [-1, 4 * (self.reg_max + 1)])
+ bbox_targets = bbox_targets.reshape([-1, 4])
+ labels = labels.reshape([-1])
+ label_weights = label_weights.reshape([-1])
+
+ bg_class_ind = self.num_classes
+ pos_inds = paddle.nonzero(
+ paddle.logical_and((labels >= 0), (labels < bg_class_ind)),
+ as_tuple=False).squeeze(1)
+ score = np.zeros(labels.shape)
+ if len(pos_inds) > 0:
+ pos_bbox_targets = paddle.gather(bbox_targets, pos_inds, axis=0)
+ pos_bbox_pred = paddle.gather(bbox_pred, pos_inds, axis=0)
+ pos_grid_cells = paddle.gather(grid_cells, pos_inds, axis=0)
+ pos_grid_cell_centers = self._grid_cells_to_center(
+ pos_grid_cells) / stride
+
+ weight_targets = F.sigmoid(cls_score.detach())
+ weight_targets = paddle.gather(
+ weight_targets.max(axis=1, keepdim=True), pos_inds, axis=0)
+ pos_bbox_pred_corners = self.distribution_project(pos_bbox_pred)
+ pos_decode_bbox_pred = distance2bbox(pos_grid_cell_centers,
+ pos_bbox_pred_corners)
+ pos_decode_bbox_targets = pos_bbox_targets / stride
+ bbox_iou = bbox_overlaps(
+ pos_decode_bbox_pred.detach().numpy(),
+ pos_decode_bbox_targets.detach().numpy(),
+ is_aligned=True)
+ score[pos_inds.numpy()] = bbox_iou
+ pred_corners = pos_bbox_pred.reshape([-1, self.reg_max + 1])
+ target_corners = bbox2distance(pos_grid_cell_centers,
+ pos_decode_bbox_targets,
+ self.reg_max).reshape([-1])
+ # regression loss
+ loss_bbox = paddle.sum(
+ self.loss_bbox(pos_decode_bbox_pred,
+ pos_decode_bbox_targets) * weight_targets)
+
+ # dfl loss
+ loss_dfl = self.loss_dfl(
+ pred_corners,
+ target_corners,
+ weight=weight_targets.expand([-1, 4]).reshape([-1]),
+ avg_factor=4.0)
+ else:
+ loss_bbox = bbox_pred.sum() * 0
+ loss_dfl = bbox_pred.sum() * 0
+ weight_targets = paddle.to_tensor([0], dtype='float32')
+
+ # qfl loss
+ score = paddle.to_tensor(score)
+ loss_qfl = self.loss_qfl(
+ cls_score, (labels, score),
+ weight=label_weights,
+ avg_factor=num_total_pos)
+ loss_bbox_list.append(loss_bbox)
+ loss_dfl_list.append(loss_dfl)
+ loss_qfl_list.append(loss_qfl)
+ avg_factor.append(weight_targets.sum())
+
+ avg_factor = sum(avg_factor)
+ try:
+ avg_factor = paddle.distributed.all_reduce(avg_factor.clone())
+ avg_factor = paddle.clip(
+ avg_factor / paddle.distributed.get_world_size(), min=1)
+ except:
+ avg_factor = max(avg_factor.item(), 1)
+ if avg_factor <= 0:
+ loss_qfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
+ loss_bbox = paddle.to_tensor(
+ 0, dtype='float32', stop_gradient=False)
+ loss_dfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
+ else:
+ losses_bbox = list(map(lambda x: x / avg_factor, loss_bbox_list))
+ losses_dfl = list(map(lambda x: x / avg_factor, loss_dfl_list))
+ loss_qfl = sum(loss_qfl_list)
+ loss_bbox = sum(losses_bbox)
+ loss_dfl = sum(losses_dfl)
+
+ loss_states = dict(
+ loss_qfl=loss_qfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
+
+ return loss_states
+
+ def get_single_level_center_point(self, featmap_size, stride,
+ cell_offset=0):
+ """
+ Generate pixel centers of a single stage feature map.
+ Args:
+ featmap_size: height and width of the feature map
+ stride: down sample stride of the feature map
+ Returns:
+ y and x of the center points
+ """
+ h, w = featmap_size
+ x_range = (paddle.arange(w, dtype='float32') + cell_offset) * stride
+ y_range = (paddle.arange(h, dtype='float32') + cell_offset) * stride
+ y, x = paddle.meshgrid(y_range, x_range)
+ y = y.flatten()
+ x = x.flatten()
+ return y, x
+
+ def get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ img_shape,
+ scale_factor,
+ rescale=True,
+ cell_offset=0):
+ assert len(cls_scores) == len(bbox_preds)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for stride, cls_score, bbox_pred in zip(self.fpn_stride, cls_scores,
+ bbox_preds):
+ featmap_size = [
+ paddle.shape(cls_score)[0], paddle.shape(cls_score)[1]
+ ]
+ y, x = self.get_single_level_center_point(
+ featmap_size, stride, cell_offset=cell_offset)
+ center_points = paddle.stack([x, y], axis=-1)
+ scores = cls_score.reshape([-1, self.cls_out_channels])
+ bbox_pred = self.distribution_project(bbox_pred) * stride
+
+ if scores.shape[0] > self.nms_pre:
+ max_scores = scores.max(axis=1)
+ _, topk_inds = max_scores.topk(self.nms_pre)
+ center_points = center_points.gather(topk_inds)
+ bbox_pred = bbox_pred.gather(topk_inds)
+ scores = scores.gather(topk_inds)
+
+ bboxes = distance2bbox(
+ center_points, bbox_pred, max_shape=img_shape)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_bboxes = paddle.concat(mlvl_bboxes)
+ if rescale:
+ # [h_scale, w_scale] to [w_scale, h_scale, w_scale, h_scale]
+ im_scale = paddle.concat([scale_factor[::-1], scale_factor[::-1]])
+ mlvl_bboxes /= im_scale
+ mlvl_scores = paddle.concat(mlvl_scores)
+ mlvl_scores = mlvl_scores.transpose([1, 0])
+ return mlvl_bboxes, mlvl_scores
+
+ def decode(self, cls_scores, bbox_preds, im_shape, scale_factor,
+ cell_offset):
+ batch_bboxes = []
+ batch_scores = []
+ for img_id in range(cls_scores[0].shape[0]):
+ num_levels = len(cls_scores)
+ cls_score_list = [cls_scores[i][img_id] for i in range(num_levels)]
+ bbox_pred_list = [bbox_preds[i][img_id] for i in range(num_levels)]
+ bboxes, scores = self.get_bboxes_single(
+ cls_score_list,
+ bbox_pred_list,
+ im_shape[img_id],
+ scale_factor[img_id],
+ cell_offset=cell_offset)
+ batch_bboxes.append(bboxes)
+ batch_scores.append(scores)
+ batch_bboxes = paddle.stack(batch_bboxes, axis=0)
+ batch_scores = paddle.stack(batch_scores, axis=0)
+
+ return batch_bboxes, batch_scores
+
+ def post_process(self, gfl_head_outs, im_shape, scale_factor):
+ cls_scores, bboxes_reg = gfl_head_outs
+ bboxes, score = self.decode(cls_scores, bboxes_reg, im_shape,
+ scale_factor, self.cell_offset)
+ bbox_pred, bbox_num, _ = self.nms(bboxes, score)
+ return bbox_pred, bbox_num
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/keypoint_hrhrnet_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/keypoint_hrhrnet_head.py
new file mode 100644
index 000000000..869b1816e
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/keypoint_hrhrnet_head.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+
+from ppdet.core.workspace import register
+from .. import layers as L
+from ..backbones.hrnet import BasicBlock
+
+
+@register
+class HrHRNetHead(nn.Layer):
+ __inject__ = ['loss']
+
+ def __init__(self, num_joints, loss='HrHRNetLoss', swahr=False, width=32):
+ """
+ Head for HigherHRNet network
+
+ Args:
+ num_joints (int): number of keypoints
+ hrloss (object): HrHRNetLoss instance
+ swahr (bool): whether to use swahr
+ width (int): hrnet channel width
+ """
+ super(HrHRNetHead, self).__init__()
+ self.loss = loss
+
+ self.num_joints = num_joints
+ num_featout1 = num_joints * 2
+ num_featout2 = num_joints
+ self.swahr = swahr
+ self.conv1 = L.Conv2d(width, num_featout1, 1, 1, 0, bias=True)
+ self.conv2 = L.Conv2d(width, num_featout2, 1, 1, 0, bias=True)
+ self.deconv = nn.Sequential(
+ L.ConvTranspose2d(
+ num_featout1 + width, width, 4, 2, 1, 0, bias=False),
+ L.BatchNorm2d(width),
+ L.ReLU())
+ self.blocks = nn.Sequential(*(BasicBlock(
+ num_channels=width,
+ num_filters=width,
+ has_se=False,
+ freeze_norm=False,
+ name='HrHRNetHead_{}'.format(i)) for i in range(4)))
+
+ self.interpolate = L.Upsample(2, mode='bilinear')
+ self.concat = L.Concat(dim=1)
+ if swahr:
+ self.scalelayer0 = nn.Sequential(
+ L.Conv2d(
+ width, num_joints, 1, 1, 0, bias=True),
+ L.BatchNorm2d(num_joints),
+ L.ReLU(),
+ L.Conv2d(
+ num_joints,
+ num_joints,
+ 9,
+ 1,
+ 4,
+ groups=num_joints,
+ bias=True))
+ self.scalelayer1 = nn.Sequential(
+ L.Conv2d(
+ width, num_joints, 1, 1, 0, bias=True),
+ L.BatchNorm2d(num_joints),
+ L.ReLU(),
+ L.Conv2d(
+ num_joints,
+ num_joints,
+ 9,
+ 1,
+ 4,
+ groups=num_joints,
+ bias=True))
+
+ def forward(self, feats, targets=None):
+ x1 = feats[0]
+ xo1 = self.conv1(x1)
+ x2 = self.blocks(self.deconv(self.concat((x1, xo1))))
+ xo2 = self.conv2(x2)
+ num_joints = self.num_joints
+ if self.training:
+ heatmap1, tagmap = paddle.split(xo1, 2, axis=1)
+ if self.swahr:
+ so1 = self.scalelayer0(x1)
+ so2 = self.scalelayer1(x2)
+ hrhrnet_outputs = ([heatmap1, so1], [xo2, so2], tagmap)
+ return self.loss(hrhrnet_outputs, targets)
+ else:
+ hrhrnet_outputs = (heatmap1, xo2, tagmap)
+ return self.loss(hrhrnet_outputs, targets)
+
+ # averaged heatmap, upsampled tagmap
+ upsampled = self.interpolate(xo1)
+ avg = (upsampled[:, :num_joints] + xo2[:, :num_joints]) / 2
+ return avg, upsampled[:, num_joints:]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/mask_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/mask_head.py
new file mode 100644
index 000000000..bfce2dc5b
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/mask_head.py
@@ -0,0 +1,250 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import KaimingNormal
+
+from ppdet.core.workspace import register, create
+from ppdet.modeling.layers import ConvNormLayer
+from .roi_extractor import RoIAlign
+
+
+@register
+class MaskFeat(nn.Layer):
+ """
+ Feature extraction in Mask head
+
+ Args:
+ in_channel (int): Input channels
+ out_channel (int): Output channels
+ num_convs (int): The number of conv layers, default 4
+ norm_type (string | None): Norm type, bn, gn, sync_bn are available,
+ default None
+ """
+
+ def __init__(self,
+ in_channel=256,
+ out_channel=256,
+ num_convs=4,
+ norm_type=None):
+ super(MaskFeat, self).__init__()
+ self.num_convs = num_convs
+ self.in_channel = in_channel
+ self.out_channel = out_channel
+ self.norm_type = norm_type
+ fan_conv = out_channel * 3 * 3
+ fan_deconv = out_channel * 2 * 2
+
+ mask_conv = nn.Sequential()
+ if norm_type == 'gn':
+ for i in range(self.num_convs):
+ conv_name = 'mask_inter_feat_{}'.format(i + 1)
+ mask_conv.add_sublayer(
+ conv_name,
+ ConvNormLayer(
+ ch_in=in_channel if i == 0 else out_channel,
+ ch_out=out_channel,
+ filter_size=3,
+ stride=1,
+ norm_type=self.norm_type,
+ initializer=KaimingNormal(fan_in=fan_conv),
+ skip_quant=True))
+ mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
+ else:
+ for i in range(self.num_convs):
+ conv_name = 'mask_inter_feat_{}'.format(i + 1)
+ conv = nn.Conv2D(
+ in_channels=in_channel if i == 0 else out_channel,
+ out_channels=out_channel,
+ kernel_size=3,
+ padding=1,
+ weight_attr=paddle.ParamAttr(
+ initializer=KaimingNormal(fan_in=fan_conv)))
+ conv.skip_quant = True
+ mask_conv.add_sublayer(conv_name, conv)
+ mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
+ mask_conv.add_sublayer(
+ 'conv5_mask',
+ nn.Conv2DTranspose(
+ in_channels=self.in_channel,
+ out_channels=self.out_channel,
+ kernel_size=2,
+ stride=2,
+ weight_attr=paddle.ParamAttr(
+ initializer=KaimingNormal(fan_in=fan_deconv))))
+ mask_conv.add_sublayer('conv5_mask' + 'act', nn.ReLU())
+ self.upsample = mask_conv
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ if isinstance(input_shape, (list, tuple)):
+ input_shape = input_shape[0]
+ return {'in_channel': input_shape.channels, }
+
+ def out_channels(self):
+ return self.out_channel
+
+ def forward(self, feats):
+ return self.upsample(feats)
+
+
+@register
+class MaskHead(nn.Layer):
+ __shared__ = ['num_classes']
+ __inject__ = ['mask_assigner']
+ """
+ RCNN mask head
+
+ Args:
+ head (nn.Layer): Extract feature in mask head
+ roi_extractor (object): The module of RoI Extractor
+ mask_assigner (object): The module of Mask Assigner,
+ label and sample the mask
+ num_classes (int): The number of classes
+ share_bbox_feat (bool): Whether to share the feature from bbox head,
+ default false
+ """
+
+ def __init__(self,
+ head,
+ roi_extractor=RoIAlign().__dict__,
+ mask_assigner='MaskAssigner',
+ num_classes=80,
+ share_bbox_feat=False):
+ super(MaskHead, self).__init__()
+ self.num_classes = num_classes
+
+ self.roi_extractor = roi_extractor
+ if isinstance(roi_extractor, dict):
+ self.roi_extractor = RoIAlign(**roi_extractor)
+ self.head = head
+ self.in_channels = head.out_channels()
+ self.mask_assigner = mask_assigner
+ self.share_bbox_feat = share_bbox_feat
+ self.bbox_head = None
+
+ self.mask_fcn_logits = nn.Conv2D(
+ in_channels=self.in_channels,
+ out_channels=self.num_classes,
+ kernel_size=1,
+ weight_attr=paddle.ParamAttr(initializer=KaimingNormal(
+ fan_in=self.num_classes)))
+ self.mask_fcn_logits.skip_quant = True
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ roi_pooler = cfg['roi_extractor']
+ assert isinstance(roi_pooler, dict)
+ kwargs = RoIAlign.from_config(cfg, input_shape)
+ roi_pooler.update(kwargs)
+ kwargs = {'input_shape': input_shape}
+ head = create(cfg['head'], **kwargs)
+ return {
+ 'roi_extractor': roi_pooler,
+ 'head': head,
+ }
+
+ def get_loss(self, mask_logits, mask_label, mask_target, mask_weight):
+ mask_label = F.one_hot(mask_label, self.num_classes).unsqueeze([2, 3])
+ mask_label = paddle.expand_as(mask_label, mask_logits)
+ mask_label.stop_gradient = True
+ mask_pred = paddle.gather_nd(mask_logits, paddle.nonzero(mask_label))
+ shape = mask_logits.shape
+ mask_pred = paddle.reshape(mask_pred, [shape[0], shape[2], shape[3]])
+
+ mask_target = mask_target.cast('float32')
+ mask_weight = mask_weight.unsqueeze([1, 2])
+ loss_mask = F.binary_cross_entropy_with_logits(
+ mask_pred, mask_target, weight=mask_weight, reduction="mean")
+ return loss_mask
+
+ def forward_train(self, body_feats, rois, rois_num, inputs, targets,
+ bbox_feat):
+ """
+ body_feats (list[Tensor]): Multi-level backbone features
+ rois (list[Tensor]): Proposals for each batch with shape [N, 4]
+ rois_num (Tensor): The number of proposals for each batch
+ inputs (dict): ground truth info
+ """
+ tgt_labels, _, tgt_gt_inds = targets
+ rois, rois_num, tgt_classes, tgt_masks, mask_index, tgt_weights = self.mask_assigner(
+ rois, tgt_labels, tgt_gt_inds, inputs)
+
+ if self.share_bbox_feat:
+ rois_feat = paddle.gather(bbox_feat, mask_index)
+ else:
+ rois_feat = self.roi_extractor(body_feats, rois, rois_num)
+ mask_feat = self.head(rois_feat)
+ mask_logits = self.mask_fcn_logits(mask_feat)
+
+ loss_mask = self.get_loss(mask_logits, tgt_classes, tgt_masks,
+ tgt_weights)
+ return {'loss_mask': loss_mask}
+
+ def forward_test(self,
+ body_feats,
+ rois,
+ rois_num,
+ scale_factor,
+ feat_func=None):
+ """
+ body_feats (list[Tensor]): Multi-level backbone features
+ rois (Tensor): Prediction from bbox head with shape [N, 6]
+ rois_num (Tensor): The number of prediction for each batch
+ scale_factor (Tensor): The scale factor from origin size to input size
+ """
+ if rois.shape[0] == 0:
+ mask_out = paddle.full([1, 1, 1, 1], -1)
+ else:
+ bbox = [rois[:, 2:]]
+ labels = rois[:, 0].cast('int32')
+ rois_feat = self.roi_extractor(body_feats, bbox, rois_num)
+ if self.share_bbox_feat:
+ assert feat_func is not None
+ rois_feat = feat_func(rois_feat)
+
+ mask_feat = self.head(rois_feat)
+ mask_logit = self.mask_fcn_logits(mask_feat)
+ mask_num_class = mask_logit.shape[1]
+ if mask_num_class == 1:
+ mask_out = F.sigmoid(mask_logit)
+ else:
+ num_masks = mask_logit.shape[0]
+ mask_out = []
+ # TODO: need to optimize gather
+ for i in range(mask_logit.shape[0]):
+ pred_masks = paddle.unsqueeze(
+ mask_logit[i, :, :, :], axis=0)
+ mask = paddle.gather(pred_masks, labels[i], axis=1)
+ mask_out.append(mask)
+ mask_out = F.sigmoid(paddle.concat(mask_out))
+ return mask_out
+
+ def forward(self,
+ body_feats,
+ rois,
+ rois_num,
+ inputs,
+ targets=None,
+ bbox_feat=None,
+ feat_func=None):
+ if self.training:
+ return self.forward_train(body_feats, rois, rois_num, inputs,
+ targets, bbox_feat)
+ else:
+ im_scale = inputs['scale_factor']
+ return self.forward_test(body_feats, rois, rois_num, im_scale,
+ feat_func)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/pico_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/pico_head.py
new file mode 100644
index 000000000..7cfd24c3c
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/pico_head.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import numpy as np
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import Normal, Constant
+
+from ppdet.core.workspace import register
+from ppdet.modeling.layers import ConvNormLayer
+from .simota_head import OTAVFLHead
+
+
+@register
+class PicoFeat(nn.Layer):
+ """
+ PicoFeat of PicoDet
+
+ Args:
+ feat_in (int): The channel number of input Tensor.
+ feat_out (int): The channel number of output Tensor.
+ num_convs (int): The convolution number of the LiteGFLFeat.
+ norm_type (str): Normalization type, 'bn'/'sync_bn'/'gn'.
+ """
+
+ def __init__(self,
+ feat_in=256,
+ feat_out=96,
+ num_fpn_stride=3,
+ num_convs=2,
+ norm_type='bn',
+ share_cls_reg=False,
+ act='hard_swish'):
+ super(PicoFeat, self).__init__()
+ self.num_convs = num_convs
+ self.norm_type = norm_type
+ self.share_cls_reg = share_cls_reg
+ self.act = act
+ self.cls_convs = []
+ self.reg_convs = []
+ for stage_idx in range(num_fpn_stride):
+ cls_subnet_convs = []
+ reg_subnet_convs = []
+ for i in range(self.num_convs):
+ in_c = feat_in if i == 0 else feat_out
+ cls_conv_dw = self.add_sublayer(
+ 'cls_conv_dw{}.{}'.format(stage_idx, i),
+ ConvNormLayer(
+ ch_in=in_c,
+ ch_out=feat_out,
+ filter_size=5,
+ stride=1,
+ groups=feat_out,
+ norm_type=norm_type,
+ bias_on=False,
+ lr_scale=2.))
+ cls_subnet_convs.append(cls_conv_dw)
+ cls_conv_pw = self.add_sublayer(
+ 'cls_conv_pw{}.{}'.format(stage_idx, i),
+ ConvNormLayer(
+ ch_in=in_c,
+ ch_out=feat_out,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ bias_on=False,
+ lr_scale=2.))
+ cls_subnet_convs.append(cls_conv_pw)
+
+ if not self.share_cls_reg:
+ reg_conv_dw = self.add_sublayer(
+ 'reg_conv_dw{}.{}'.format(stage_idx, i),
+ ConvNormLayer(
+ ch_in=in_c,
+ ch_out=feat_out,
+ filter_size=5,
+ stride=1,
+ groups=feat_out,
+ norm_type=norm_type,
+ bias_on=False,
+ lr_scale=2.))
+ reg_subnet_convs.append(reg_conv_dw)
+ reg_conv_pw = self.add_sublayer(
+ 'reg_conv_pw{}.{}'.format(stage_idx, i),
+ ConvNormLayer(
+ ch_in=in_c,
+ ch_out=feat_out,
+ filter_size=1,
+ stride=1,
+ norm_type=norm_type,
+ bias_on=False,
+ lr_scale=2.))
+ reg_subnet_convs.append(reg_conv_pw)
+ self.cls_convs.append(cls_subnet_convs)
+ self.reg_convs.append(reg_subnet_convs)
+
+ def act_func(self, x):
+ if self.act == "leaky_relu":
+ x = F.leaky_relu(x)
+ elif self.act == "hard_swish":
+ x = F.hardswish(x)
+ return x
+
+ def forward(self, fpn_feat, stage_idx):
+ assert stage_idx < len(self.cls_convs)
+ cls_feat = fpn_feat
+ reg_feat = fpn_feat
+ for i in range(len(self.cls_convs[stage_idx])):
+ cls_feat = self.act_func(self.cls_convs[stage_idx][i](cls_feat))
+ if not self.share_cls_reg:
+ reg_feat = self.act_func(self.reg_convs[stage_idx][i](reg_feat))
+ return cls_feat, reg_feat
+
+
+@register
+class PicoHead(OTAVFLHead):
+ """
+ PicoHead
+ Args:
+ conv_feat (object): Instance of 'PicoFeat'
+ num_classes (int): Number of classes
+ fpn_stride (list): The stride of each FPN Layer
+ prior_prob (float): Used to set the bias init for the class prediction layer
+ loss_class (object): Instance of VariFocalLoss.
+ loss_dfl (object): Instance of DistributionFocalLoss.
+ loss_bbox (object): Instance of bbox loss.
+ assigner (object): Instance of label assigner.
+ reg_max: Max value of integral set :math: `{0, ..., reg_max}`
+ n QFL setting. Default: 7.
+ """
+ __inject__ = [
+ 'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
+ 'assigner', 'nms'
+ ]
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ conv_feat='PicoFeat',
+ dgqp_module=None,
+ num_classes=80,
+ fpn_stride=[8, 16, 32],
+ prior_prob=0.01,
+ loss_class='VariFocalLoss',
+ loss_dfl='DistributionFocalLoss',
+ loss_bbox='GIoULoss',
+ assigner='SimOTAAssigner',
+ reg_max=16,
+ feat_in_chan=96,
+ nms=None,
+ nms_pre=1000,
+ cell_offset=0):
+ super(PicoHead, self).__init__(
+ conv_feat=conv_feat,
+ dgqp_module=dgqp_module,
+ num_classes=num_classes,
+ fpn_stride=fpn_stride,
+ prior_prob=prior_prob,
+ loss_class=loss_class,
+ loss_dfl=loss_dfl,
+ loss_bbox=loss_bbox,
+ assigner=assigner,
+ reg_max=reg_max,
+ feat_in_chan=feat_in_chan,
+ nms=nms,
+ nms_pre=nms_pre,
+ cell_offset=cell_offset)
+ self.conv_feat = conv_feat
+ self.num_classes = num_classes
+ self.fpn_stride = fpn_stride
+ self.prior_prob = prior_prob
+ self.loss_vfl = loss_class
+ self.loss_dfl = loss_dfl
+ self.loss_bbox = loss_bbox
+ self.assigner = assigner
+ self.reg_max = reg_max
+ self.feat_in_chan = feat_in_chan
+ self.nms = nms
+ self.nms_pre = nms_pre
+ self.cell_offset = cell_offset
+
+ self.use_sigmoid = self.loss_vfl.use_sigmoid
+ if self.use_sigmoid:
+ self.cls_out_channels = self.num_classes
+ else:
+ self.cls_out_channels = self.num_classes + 1
+ bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)
+ # Clear the super class initialization
+ self.gfl_head_cls = None
+ self.gfl_head_reg = None
+ self.scales_regs = None
+
+ self.head_cls_list = []
+ self.head_reg_list = []
+ for i in range(len(fpn_stride)):
+ head_cls = self.add_sublayer(
+ "head_cls" + str(i),
+ nn.Conv2D(
+ in_channels=self.feat_in_chan,
+ out_channels=self.cls_out_channels + 4 * (self.reg_max + 1)
+ if self.conv_feat.share_cls_reg else self.cls_out_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(
+ initializer=Constant(value=bias_init_value))))
+ self.head_cls_list.append(head_cls)
+ if not self.conv_feat.share_cls_reg:
+ head_reg = self.add_sublayer(
+ "head_reg" + str(i),
+ nn.Conv2D(
+ in_channels=self.feat_in_chan,
+ out_channels=4 * (self.reg_max + 1),
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(initializer=Constant(value=0))))
+ self.head_reg_list.append(head_reg)
+
+ def forward(self, fpn_feats, deploy=False):
+ assert len(fpn_feats) == len(
+ self.fpn_stride
+ ), "The size of fpn_feats is not equal to size of fpn_stride"
+ cls_logits_list = []
+ bboxes_reg_list = []
+ for i, fpn_feat in enumerate(fpn_feats):
+ conv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat, i)
+ if self.conv_feat.share_cls_reg:
+ cls_logits = self.head_cls_list[i](conv_cls_feat)
+ cls_score, bbox_pred = paddle.split(
+ cls_logits,
+ [self.cls_out_channels, 4 * (self.reg_max + 1)],
+ axis=1)
+ else:
+ cls_score = self.head_cls_list[i](conv_cls_feat)
+ bbox_pred = self.head_reg_list[i](conv_reg_feat)
+
+ if self.dgqp_module:
+ quality_score = self.dgqp_module(bbox_pred)
+ cls_score = F.sigmoid(cls_score) * quality_score
+
+ if deploy:
+ # Now only supports batch size = 1 in deploy
+ # TODO(ygh): support batch size > 1
+ cls_score = F.sigmoid(cls_score).reshape(
+ [1, self.cls_out_channels, -1]).transpose([0, 2, 1])
+ bbox_pred = bbox_pred.reshape([1, (self.reg_max + 1) * 4,
+ -1]).transpose([0, 2, 1])
+ elif not self.training:
+ cls_score = F.sigmoid(cls_score.transpose([0, 2, 3, 1]))
+ bbox_pred = bbox_pred.transpose([0, 2, 3, 1])
+
+ cls_logits_list.append(cls_score)
+ bboxes_reg_list.append(bbox_pred)
+
+ return (cls_logits_list, bboxes_reg_list)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/roi_extractor.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/roi_extractor.py
new file mode 100644
index 000000000..35c3924e3
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/roi_extractor.py
@@ -0,0 +1,111 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+from ppdet.core.workspace import register
+from ppdet.modeling import ops
+
+
+def _to_list(v):
+ if not isinstance(v, (list, tuple)):
+ return [v]
+ return v
+
+
+@register
+class RoIAlign(object):
+ """
+ RoI Align module
+
+ For more details, please refer to the document of roi_align in
+ in ppdet/modeing/ops.py
+
+ Args:
+ resolution (int): The output size, default 14
+ spatial_scale (float): Multiplicative spatial scale factor to translate
+ ROI coords from their input scale to the scale used when pooling.
+ default 0.0625
+ sampling_ratio (int): The number of sampling points in the interpolation
+ grid, default 0
+ canconical_level (int): The referring level of FPN layer with
+ specified level. default 4
+ canonical_size (int): The referring scale of FPN layer with
+ specified scale. default 224
+ start_level (int): The start level of FPN layer to extract RoI feature,
+ default 0
+ end_level (int): The end level of FPN layer to extract RoI feature,
+ default 3
+ aligned (bool): Whether to add offset to rois' coord in roi_align.
+ default false
+ """
+
+ def __init__(self,
+ resolution=14,
+ spatial_scale=0.0625,
+ sampling_ratio=0,
+ canconical_level=4,
+ canonical_size=224,
+ start_level=0,
+ end_level=3,
+ aligned=False):
+ super(RoIAlign, self).__init__()
+ self.resolution = resolution
+ self.spatial_scale = _to_list(spatial_scale)
+ self.sampling_ratio = sampling_ratio
+ self.canconical_level = canconical_level
+ self.canonical_size = canonical_size
+ self.start_level = start_level
+ self.end_level = end_level
+ self.aligned = aligned
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'spatial_scale': [1. / i.stride for i in input_shape]}
+
+ def __call__(self, feats, roi, rois_num):
+ roi = paddle.concat(roi) if len(roi) > 1 else roi[0]
+ if len(feats) == 1:
+ rois_feat = ops.roi_align(
+ feats[self.start_level],
+ roi,
+ self.resolution,
+ self.spatial_scale[0],
+ rois_num=rois_num,
+ aligned=self.aligned)
+ else:
+ offset = 2
+ k_min = self.start_level + offset
+ k_max = self.end_level + offset
+ rois_dist, restore_index, rois_num_dist = ops.distribute_fpn_proposals(
+ roi,
+ k_min,
+ k_max,
+ self.canconical_level,
+ self.canonical_size,
+ rois_num=rois_num)
+ rois_feat_list = []
+ for lvl in range(self.start_level, self.end_level + 1):
+ roi_feat = ops.roi_align(
+ feats[lvl],
+ rois_dist[lvl],
+ self.resolution,
+ self.spatial_scale[lvl],
+ sampling_ratio=self.sampling_ratio,
+ rois_num=rois_num_dist[lvl],
+ aligned=self.aligned)
+ rois_feat_list.append(roi_feat)
+ rois_feat_shuffle = paddle.concat(rois_feat_list)
+ rois_feat = paddle.gather(rois_feat_shuffle, restore_index)
+
+ return rois_feat
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/s2anet_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/s2anet_head.py
new file mode 100644
index 000000000..7910379c4
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/s2anet_head.py
@@ -0,0 +1,1048 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/models/anchor_heads_rotated/s2anet_head.py
+
+import paddle
+from paddle import ParamAttr
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import Normal, Constant
+from ppdet.core.workspace import register
+from ppdet.modeling import ops
+from ppdet.modeling import bbox_utils
+from ppdet.modeling.proposal_generator.target_layer import RBoxAssigner
+import numpy as np
+
+
+class S2ANetAnchorGenerator(nn.Layer):
+ """
+ AnchorGenerator by paddle
+ """
+
+ def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):
+ super(S2ANetAnchorGenerator, self).__init__()
+ self.base_size = base_size
+ self.scales = paddle.to_tensor(scales)
+ self.ratios = paddle.to_tensor(ratios)
+ self.scale_major = scale_major
+ self.ctr = ctr
+ self.base_anchors = self.gen_base_anchors()
+
+ @property
+ def num_base_anchors(self):
+ return self.base_anchors.shape[0]
+
+ def gen_base_anchors(self):
+ w = self.base_size
+ h = self.base_size
+ if self.ctr is None:
+ x_ctr = 0.5 * (w - 1)
+ y_ctr = 0.5 * (h - 1)
+ else:
+ x_ctr, y_ctr = self.ctr
+
+ h_ratios = paddle.sqrt(self.ratios)
+ w_ratios = 1 / h_ratios
+ if self.scale_major:
+ ws = (w * w_ratios[:] * self.scales[:]).reshape([-1])
+ hs = (h * h_ratios[:] * self.scales[:]).reshape([-1])
+ else:
+ ws = (w * self.scales[:] * w_ratios[:]).reshape([-1])
+ hs = (h * self.scales[:] * h_ratios[:]).reshape([-1])
+
+ base_anchors = paddle.stack(
+ [
+ x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),
+ x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
+ ],
+ axis=-1)
+ base_anchors = paddle.round(base_anchors)
+ return base_anchors
+
+ def _meshgrid(self, x, y, row_major=True):
+ yy, xx = paddle.meshgrid(y, x)
+ yy = yy.reshape([-1])
+ xx = xx.reshape([-1])
+ if row_major:
+ return xx, yy
+ else:
+ return yy, xx
+
+ def forward(self, featmap_size, stride=16):
+ # featmap_size*stride project it to original area
+
+ feat_h = featmap_size[0]
+ feat_w = featmap_size[1]
+ shift_x = paddle.arange(0, feat_w, 1, 'int32') * stride
+ shift_y = paddle.arange(0, feat_h, 1, 'int32') * stride
+ shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
+ shifts = paddle.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)
+
+ all_anchors = self.base_anchors[:, :] + shifts[:, :]
+ all_anchors = all_anchors.reshape([feat_h * feat_w, 4])
+ return all_anchors
+
+ def valid_flags(self, featmap_size, valid_size):
+ feat_h, feat_w = featmap_size
+ valid_h, valid_w = valid_size
+ assert valid_h <= feat_h and valid_w <= feat_w
+ valid_x = paddle.zeros([feat_w], dtype='int32')
+ valid_y = paddle.zeros([feat_h], dtype='int32')
+ valid_x[:valid_w] = 1
+ valid_y[:valid_h] = 1
+ valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
+ valid = valid_xx & valid_yy
+ valid = paddle.reshape(valid, [-1, 1])
+ valid = paddle.expand(valid, [-1, self.num_base_anchors]).reshape([-1])
+ return valid
+
+
+class AlignConv(nn.Layer):
+ def __init__(self, in_channels, out_channels, kernel_size=3, groups=1):
+ super(AlignConv, self).__init__()
+ self.kernel_size = kernel_size
+ self.align_conv = paddle.vision.ops.DeformConv2D(
+ in_channels,
+ out_channels,
+ kernel_size=self.kernel_size,
+ padding=(self.kernel_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
+ bias_attr=None)
+
+ @paddle.no_grad()
+ def get_offset(self, anchors, featmap_size, stride):
+ """
+ Args:
+ anchors: [M,5] xc,yc,w,h,angle
+ featmap_size: (feat_h, feat_w)
+ stride: 8
+ Returns:
+
+ """
+ anchors = paddle.reshape(anchors, [-1, 5]) # (NA,5)
+ dtype = anchors.dtype
+ feat_h = featmap_size[0]
+ feat_w = featmap_size[1]
+ pad = (self.kernel_size - 1) // 2
+ idx = paddle.arange(-pad, pad + 1, dtype=dtype)
+
+ yy, xx = paddle.meshgrid(idx, idx)
+ xx = paddle.reshape(xx, [-1])
+ yy = paddle.reshape(yy, [-1])
+
+ # get sampling locations of default conv
+ xc = paddle.arange(0, feat_w, dtype=dtype)
+ yc = paddle.arange(0, feat_h, dtype=dtype)
+ yc, xc = paddle.meshgrid(yc, xc)
+
+ xc = paddle.reshape(xc, [-1, 1])
+ yc = paddle.reshape(yc, [-1, 1])
+ x_conv = xc + xx
+ y_conv = yc + yy
+
+ # get sampling locations of anchors
+ # x_ctr, y_ctr, w, h, a = np.unbind(anchors, dim=1)
+ x_ctr = anchors[:, 0]
+ y_ctr = anchors[:, 1]
+ w = anchors[:, 2]
+ h = anchors[:, 3]
+ a = anchors[:, 4]
+
+ x_ctr = paddle.reshape(x_ctr, [-1, 1])
+ y_ctr = paddle.reshape(y_ctr, [-1, 1])
+ w = paddle.reshape(w, [-1, 1])
+ h = paddle.reshape(h, [-1, 1])
+ a = paddle.reshape(a, [-1, 1])
+
+ x_ctr = x_ctr / stride
+ y_ctr = y_ctr / stride
+ w_s = w / stride
+ h_s = h / stride
+ cos, sin = paddle.cos(a), paddle.sin(a)
+ dw, dh = w_s / self.kernel_size, h_s / self.kernel_size
+ x, y = dw * xx, dh * yy
+ xr = cos * x - sin * y
+ yr = sin * x + cos * y
+ x_anchor, y_anchor = xr + x_ctr, yr + y_ctr
+ # get offset filed
+ offset_x = x_anchor - x_conv
+ offset_y = y_anchor - y_conv
+ offset = paddle.stack([offset_y, offset_x], axis=-1)
+ offset = paddle.reshape(
+ offset, [feat_h * feat_w, self.kernel_size * self.kernel_size * 2])
+ offset = paddle.transpose(offset, [1, 0])
+ offset = paddle.reshape(
+ offset,
+ [1, self.kernel_size * self.kernel_size * 2, feat_h, feat_w])
+ return offset
+
+ def forward(self, x, refine_anchors, featmap_size, stride):
+ offset = self.get_offset(refine_anchors, featmap_size, stride)
+ x = F.relu(self.align_conv(x, offset))
+ return x
+
+
+@register
+class S2ANetHead(nn.Layer):
+ """
+ S2Anet head
+ Args:
+ stacked_convs (int): number of stacked_convs
+ feat_in (int): input channels of feat
+ feat_out (int): output channels of feat
+ num_classes (int): num_classes
+ anchor_strides (list): stride of anchors
+ anchor_scales (list): scale of anchors
+ anchor_ratios (list): ratios of anchors
+ target_means (list): target_means
+ target_stds (list): target_stds
+ align_conv_type (str): align_conv_type ['Conv', 'AlignConv']
+ align_conv_size (int): kernel size of align_conv
+ use_sigmoid_cls (bool): use sigmoid_cls or not
+ reg_loss_weight (list): loss weight for regression
+ """
+ __shared__ = ['num_classes']
+ __inject__ = ['anchor_assign']
+
+ def __init__(self,
+ stacked_convs=2,
+ feat_in=256,
+ feat_out=256,
+ num_classes=15,
+ anchor_strides=[8, 16, 32, 64, 128],
+ anchor_scales=[4],
+ anchor_ratios=[1.0],
+ target_means=0.0,
+ target_stds=1.0,
+ align_conv_type='AlignConv',
+ align_conv_size=3,
+ use_sigmoid_cls=True,
+ anchor_assign=RBoxAssigner().__dict__,
+ reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.1],
+ cls_loss_weight=[1.1, 1.05],
+ reg_loss_type='l1'):
+ super(S2ANetHead, self).__init__()
+ self.stacked_convs = stacked_convs
+ self.feat_in = feat_in
+ self.feat_out = feat_out
+ self.anchor_list = None
+ self.anchor_scales = anchor_scales
+ self.anchor_ratios = anchor_ratios
+ self.anchor_strides = anchor_strides
+ self.anchor_strides = paddle.to_tensor(anchor_strides)
+ self.anchor_base_sizes = list(anchor_strides)
+ self.means = paddle.ones(shape=[5]) * target_means
+ self.stds = paddle.ones(shape=[5]) * target_stds
+ assert align_conv_type in ['AlignConv', 'Conv', 'DCN']
+ self.align_conv_type = align_conv_type
+ self.align_conv_size = align_conv_size
+
+ self.use_sigmoid_cls = use_sigmoid_cls
+ self.cls_out_channels = num_classes if self.use_sigmoid_cls else 1
+ self.sampling = False
+ self.anchor_assign = anchor_assign
+ self.reg_loss_weight = reg_loss_weight
+ self.cls_loss_weight = cls_loss_weight
+ self.alpha = 1.0
+ self.beta = 1.0
+ self.reg_loss_type = reg_loss_type
+ self.s2anet_head_out = None
+
+ # anchor
+ self.anchor_generators = []
+ for anchor_base in self.anchor_base_sizes:
+ self.anchor_generators.append(
+ S2ANetAnchorGenerator(anchor_base, anchor_scales,
+ anchor_ratios))
+
+ self.anchor_generators = nn.LayerList(self.anchor_generators)
+ self.fam_cls_convs = nn.Sequential()
+ self.fam_reg_convs = nn.Sequential()
+
+ for i in range(self.stacked_convs):
+ chan_in = self.feat_in if i == 0 else self.feat_out
+
+ self.fam_cls_convs.add_sublayer(
+ 'fam_cls_conv_{}'.format(i),
+ nn.Conv2D(
+ in_channels=chan_in,
+ out_channels=self.feat_out,
+ kernel_size=3,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(0))))
+
+ self.fam_cls_convs.add_sublayer('fam_cls_conv_{}_act'.format(i),
+ nn.ReLU())
+
+ self.fam_reg_convs.add_sublayer(
+ 'fam_reg_conv_{}'.format(i),
+ nn.Conv2D(
+ in_channels=chan_in,
+ out_channels=self.feat_out,
+ kernel_size=3,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(0))))
+
+ self.fam_reg_convs.add_sublayer('fam_reg_conv_{}_act'.format(i),
+ nn.ReLU())
+
+ self.fam_reg = nn.Conv2D(
+ self.feat_out,
+ 5,
+ 1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(0)))
+ prior_prob = 0.01
+ bias_init = float(-np.log((1 - prior_prob) / prior_prob))
+ self.fam_cls = nn.Conv2D(
+ self.feat_out,
+ self.cls_out_channels,
+ 1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(bias_init)))
+
+ if self.align_conv_type == "AlignConv":
+ self.align_conv = AlignConv(self.feat_out, self.feat_out,
+ self.align_conv_size)
+ elif self.align_conv_type == "Conv":
+ self.align_conv = nn.Conv2D(
+ self.feat_out,
+ self.feat_out,
+ self.align_conv_size,
+ padding=(self.align_conv_size - 1) // 2,
+ bias_attr=ParamAttr(initializer=Constant(0)))
+
+ elif self.align_conv_type == "DCN":
+ self.align_conv_offset = nn.Conv2D(
+ self.feat_out,
+ 2 * self.align_conv_size**2,
+ 1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(0)))
+
+ self.align_conv = paddle.vision.ops.DeformConv2D(
+ self.feat_out,
+ self.feat_out,
+ self.align_conv_size,
+ padding=(self.align_conv_size - 1) // 2,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=False)
+
+ self.or_conv = nn.Conv2D(
+ self.feat_out,
+ self.feat_out,
+ kernel_size=3,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(0)))
+
+ # ODM
+ self.odm_cls_convs = nn.Sequential()
+ self.odm_reg_convs = nn.Sequential()
+
+ for i in range(self.stacked_convs):
+ ch_in = self.feat_out
+ # ch_in = int(self.feat_out / 8) if i == 0 else self.feat_out
+
+ self.odm_cls_convs.add_sublayer(
+ 'odm_cls_conv_{}'.format(i),
+ nn.Conv2D(
+ in_channels=ch_in,
+ out_channels=self.feat_out,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(0))))
+
+ self.odm_cls_convs.add_sublayer('odm_cls_conv_{}_act'.format(i),
+ nn.ReLU())
+
+ self.odm_reg_convs.add_sublayer(
+ 'odm_reg_conv_{}'.format(i),
+ nn.Conv2D(
+ in_channels=self.feat_out,
+ out_channels=self.feat_out,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(0))))
+
+ self.odm_reg_convs.add_sublayer('odm_reg_conv_{}_act'.format(i),
+ nn.ReLU())
+
+ self.odm_cls = nn.Conv2D(
+ self.feat_out,
+ self.cls_out_channels,
+ 3,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(bias_init)))
+ self.odm_reg = nn.Conv2D(
+ self.feat_out,
+ 5,
+ 3,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
+ bias_attr=ParamAttr(initializer=Constant(0)))
+
+ self.featmap_sizes = []
+ self.base_anchors_list = []
+ self.refine_anchor_list = []
+
+ def forward(self, feats):
+ fam_reg_branch_list = []
+ fam_cls_branch_list = []
+
+ odm_reg_branch_list = []
+ odm_cls_branch_list = []
+
+ self.featmap_sizes_list = []
+ self.base_anchors_list = []
+ self.refine_anchor_list = []
+
+ for feat_idx in range(len(feats)):
+ feat = feats[feat_idx]
+ fam_cls_feat = self.fam_cls_convs(feat)
+
+ fam_cls = self.fam_cls(fam_cls_feat)
+ # [N, CLS, H, W] --> [N, H, W, CLS]
+ fam_cls = fam_cls.transpose([0, 2, 3, 1])
+ fam_cls_reshape = paddle.reshape(
+ fam_cls, [fam_cls.shape[0], -1, self.cls_out_channels])
+ fam_cls_branch_list.append(fam_cls_reshape)
+
+ fam_reg_feat = self.fam_reg_convs(feat)
+
+ fam_reg = self.fam_reg(fam_reg_feat)
+ # [N, 5, H, W] --> [N, H, W, 5]
+ fam_reg = fam_reg.transpose([0, 2, 3, 1])
+ fam_reg_reshape = paddle.reshape(fam_reg, [fam_reg.shape[0], -1, 5])
+ fam_reg_branch_list.append(fam_reg_reshape)
+
+ # prepare anchor
+ featmap_size = (paddle.shape(feat)[2], paddle.shape(feat)[3])
+ self.featmap_sizes_list.append(featmap_size)
+ init_anchors = self.anchor_generators[feat_idx](
+ featmap_size, self.anchor_strides[feat_idx])
+
+ init_anchors = paddle.to_tensor(init_anchors, dtype='float32')
+ NA = featmap_size[0] * featmap_size[1]
+ init_anchors = paddle.reshape(init_anchors, [NA, 4])
+ init_anchors = self.rect2rbox(init_anchors)
+ self.base_anchors_list.append(init_anchors)
+
+ if self.training:
+ refine_anchor = self.bbox_decode(fam_reg.detach(), init_anchors)
+ else:
+ refine_anchor = self.bbox_decode(fam_reg, init_anchors)
+
+ self.refine_anchor_list.append(refine_anchor)
+
+ if self.align_conv_type == 'AlignConv':
+ align_feat = self.align_conv(feat,
+ refine_anchor.clone(),
+ featmap_size,
+ self.anchor_strides[feat_idx])
+ elif self.align_conv_type == 'DCN':
+ align_offset = self.align_conv_offset(feat)
+ align_feat = self.align_conv(feat, align_offset)
+ elif self.align_conv_type == 'Conv':
+ align_feat = self.align_conv(feat)
+
+ or_feat = self.or_conv(align_feat)
+ odm_reg_feat = or_feat
+ odm_cls_feat = or_feat
+
+ odm_reg_feat = self.odm_reg_convs(odm_reg_feat)
+ odm_cls_feat = self.odm_cls_convs(odm_cls_feat)
+
+ odm_cls_score = self.odm_cls(odm_cls_feat)
+ # [N, CLS, H, W] --> [N, H, W, CLS]
+ odm_cls_score = odm_cls_score.transpose([0, 2, 3, 1])
+ odm_cls_score_shape = odm_cls_score.shape
+ odm_cls_score_reshape = paddle.reshape(odm_cls_score, [
+ odm_cls_score_shape[0], odm_cls_score_shape[1] *
+ odm_cls_score_shape[2], self.cls_out_channels
+ ])
+
+ odm_cls_branch_list.append(odm_cls_score_reshape)
+
+ odm_bbox_pred = self.odm_reg(odm_reg_feat)
+ # [N, 5, H, W] --> [N, H, W, 5]
+ odm_bbox_pred = odm_bbox_pred.transpose([0, 2, 3, 1])
+ odm_bbox_pred_reshape = paddle.reshape(odm_bbox_pred, [-1, 5])
+ odm_bbox_pred_reshape = paddle.unsqueeze(
+ odm_bbox_pred_reshape, axis=0)
+ odm_reg_branch_list.append(odm_bbox_pred_reshape)
+
+ self.s2anet_head_out = (fam_cls_branch_list, fam_reg_branch_list,
+ odm_cls_branch_list, odm_reg_branch_list)
+ return self.s2anet_head_out
+
+ def get_prediction(self, nms_pre=2000):
+ refine_anchors = self.refine_anchor_list
+ fam_cls_branch_list = self.s2anet_head_out[0]
+ fam_reg_branch_list = self.s2anet_head_out[1]
+ odm_cls_branch_list = self.s2anet_head_out[2]
+ odm_reg_branch_list = self.s2anet_head_out[3]
+ pred_scores, pred_bboxes = self.get_bboxes(
+ odm_cls_branch_list, odm_reg_branch_list, refine_anchors, nms_pre,
+ self.cls_out_channels, self.use_sigmoid_cls)
+ return pred_scores, pred_bboxes
+
+ def smooth_l1_loss(self, pred, label, delta=1.0 / 9.0):
+ """
+ Args:
+ pred: pred score
+ label: label
+ delta: delta
+ Returns: loss
+ """
+ assert pred.shape == label.shape and label.numel() > 0
+ assert delta > 0
+ diff = paddle.abs(pred - label)
+ loss = paddle.where(diff < delta, 0.5 * diff * diff / delta,
+ diff - 0.5 * delta)
+ return loss
+
+ def get_fam_loss(self, fam_target, s2anet_head_out, reg_loss_type='gwd'):
+ (labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes,
+ pos_inds, neg_inds) = fam_target
+ fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out
+
+ fam_cls_losses = []
+ fam_bbox_losses = []
+ st_idx = 0
+ num_total_samples = len(pos_inds) + len(
+ neg_inds) if self.sampling else len(pos_inds)
+ num_total_samples = max(1, num_total_samples)
+
+ for idx, feat_size in enumerate(self.featmap_sizes_list):
+ feat_anchor_num = feat_size[0] * feat_size[1]
+
+ # step1: get data
+ feat_labels = labels[st_idx:st_idx + feat_anchor_num]
+ feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num]
+
+ feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :]
+ feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :]
+
+ # step2: calc cls loss
+ feat_labels = feat_labels.reshape(-1)
+ feat_label_weights = feat_label_weights.reshape(-1)
+
+ fam_cls_score = fam_cls_branch_list[idx]
+ fam_cls_score = paddle.squeeze(fam_cls_score, axis=0)
+ fam_cls_score1 = fam_cls_score
+
+ feat_labels = paddle.to_tensor(feat_labels)
+ feat_labels_one_hot = paddle.nn.functional.one_hot(
+ feat_labels, self.cls_out_channels + 1)
+ feat_labels_one_hot = feat_labels_one_hot[:, 1:]
+ feat_labels_one_hot.stop_gradient = True
+
+ num_total_samples = paddle.to_tensor(
+ num_total_samples, dtype='float32', stop_gradient=True)
+
+ fam_cls = F.sigmoid_focal_loss(
+ fam_cls_score1,
+ feat_labels_one_hot,
+ normalizer=num_total_samples,
+ reduction='none')
+
+ feat_label_weights = feat_label_weights.reshape(
+ feat_label_weights.shape[0], 1)
+ feat_label_weights = np.repeat(
+ feat_label_weights, self.cls_out_channels, axis=1)
+ feat_label_weights = paddle.to_tensor(
+ feat_label_weights, stop_gradient=True)
+
+ fam_cls = fam_cls * feat_label_weights
+ fam_cls_total = paddle.sum(fam_cls)
+ fam_cls_losses.append(fam_cls_total)
+
+ # step3: regression loss
+ feat_bbox_targets = paddle.to_tensor(
+ feat_bbox_targets, dtype='float32', stop_gradient=True)
+ feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5])
+
+ fam_bbox_pred = fam_reg_branch_list[idx]
+ fam_bbox_pred = paddle.squeeze(fam_bbox_pred, axis=0)
+ fam_bbox_pred = paddle.reshape(fam_bbox_pred, [-1, 5])
+ fam_bbox = self.smooth_l1_loss(fam_bbox_pred, feat_bbox_targets)
+ loss_weight = paddle.to_tensor(
+ self.reg_loss_weight, dtype='float32', stop_gradient=True)
+ fam_bbox = paddle.multiply(fam_bbox, loss_weight)
+ feat_bbox_weights = paddle.to_tensor(
+ feat_bbox_weights, stop_gradient=True)
+
+ if reg_loss_type == 'l1':
+ fam_bbox = fam_bbox * feat_bbox_weights
+ fam_bbox_total = paddle.sum(fam_bbox) / num_total_samples
+ elif reg_loss_type == 'iou' or reg_loss_type == 'gwd':
+ fam_bbox = paddle.sum(fam_bbox, axis=-1)
+ feat_bbox_weights = paddle.sum(feat_bbox_weights, axis=-1)
+ try:
+ from rbox_iou_ops import rbox_iou
+ except Exception as e:
+ print("import custom_ops error, try install rbox_iou_ops " \
+ "following ppdet/ext_op/README.md", e)
+ sys.stdout.flush()
+ sys.exit(-1)
+ # calc iou
+ fam_bbox_decode = self.delta2rbox(self.base_anchors_list[idx],
+ fam_bbox_pred)
+ bbox_gt_bboxes = paddle.to_tensor(
+ bbox_gt_bboxes,
+ dtype=fam_bbox_decode.dtype,
+ place=fam_bbox_decode.place)
+ bbox_gt_bboxes.stop_gradient = True
+ iou = rbox_iou(fam_bbox_decode, bbox_gt_bboxes)
+ iou = paddle.diag(iou)
+
+ if reg_loss_type == 'gwd':
+ bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx +
+ feat_anchor_num, :]
+ fam_bbox_total = self.gwd_loss(fam_bbox_decode,
+ bbox_gt_bboxes_level)
+ fam_bbox_total = fam_bbox_total * feat_bbox_weights
+ fam_bbox_total = paddle.sum(
+ fam_bbox_total) / num_total_samples
+
+ fam_bbox_losses.append(fam_bbox_total)
+ st_idx += feat_anchor_num
+
+ fam_cls_loss = paddle.add_n(fam_cls_losses)
+ fam_cls_loss_weight = paddle.to_tensor(
+ self.cls_loss_weight[0], dtype='float32', stop_gradient=True)
+ fam_cls_loss = fam_cls_loss * fam_cls_loss_weight
+ fam_reg_loss = paddle.add_n(fam_bbox_losses)
+ return fam_cls_loss, fam_reg_loss
+
+ def get_odm_loss(self, odm_target, s2anet_head_out, reg_loss_type='gwd'):
+ (labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes,
+ pos_inds, neg_inds) = odm_target
+ fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out
+
+ odm_cls_losses = []
+ odm_bbox_losses = []
+ st_idx = 0
+ num_total_samples = len(pos_inds) + len(
+ neg_inds) if self.sampling else len(pos_inds)
+ num_total_samples = max(1, num_total_samples)
+
+ for idx, feat_size in enumerate(self.featmap_sizes_list):
+ feat_anchor_num = feat_size[0] * feat_size[1]
+
+ # step1: get data
+ feat_labels = labels[st_idx:st_idx + feat_anchor_num]
+ feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num]
+
+ feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :]
+ feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :]
+
+ # step2: calc cls loss
+ feat_labels = feat_labels.reshape(-1)
+ feat_label_weights = feat_label_weights.reshape(-1)
+
+ odm_cls_score = odm_cls_branch_list[idx]
+ odm_cls_score = paddle.squeeze(odm_cls_score, axis=0)
+ odm_cls_score1 = odm_cls_score
+
+ feat_labels = paddle.to_tensor(feat_labels)
+ feat_labels_one_hot = paddle.nn.functional.one_hot(
+ feat_labels, self.cls_out_channels + 1)
+ feat_labels_one_hot = feat_labels_one_hot[:, 1:]
+ feat_labels_one_hot.stop_gradient = True
+
+ num_total_samples = paddle.to_tensor(
+ num_total_samples, dtype='float32', stop_gradient=True)
+ odm_cls = F.sigmoid_focal_loss(
+ odm_cls_score1,
+ feat_labels_one_hot,
+ normalizer=num_total_samples,
+ reduction='none')
+
+ feat_label_weights = feat_label_weights.reshape(
+ feat_label_weights.shape[0], 1)
+ feat_label_weights = np.repeat(
+ feat_label_weights, self.cls_out_channels, axis=1)
+ feat_label_weights = paddle.to_tensor(feat_label_weights)
+ feat_label_weights.stop_gradient = True
+
+ odm_cls = odm_cls * feat_label_weights
+ odm_cls_total = paddle.sum(odm_cls)
+ odm_cls_losses.append(odm_cls_total)
+
+ # # step3: regression loss
+ feat_bbox_targets = paddle.to_tensor(
+ feat_bbox_targets, dtype='float32')
+ feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5])
+ feat_bbox_targets.stop_gradient = True
+
+ odm_bbox_pred = odm_reg_branch_list[idx]
+ odm_bbox_pred = paddle.squeeze(odm_bbox_pred, axis=0)
+ odm_bbox_pred = paddle.reshape(odm_bbox_pred, [-1, 5])
+ odm_bbox = self.smooth_l1_loss(odm_bbox_pred, feat_bbox_targets)
+
+ loss_weight = paddle.to_tensor(
+ self.reg_loss_weight, dtype='float32', stop_gradient=True)
+ odm_bbox = paddle.multiply(odm_bbox, loss_weight)
+ feat_bbox_weights = paddle.to_tensor(
+ feat_bbox_weights, stop_gradient=True)
+
+ if reg_loss_type == 'l1':
+ odm_bbox = odm_bbox * feat_bbox_weights
+ odm_bbox_total = paddle.sum(odm_bbox) / num_total_samples
+ elif reg_loss_type == 'iou' or reg_loss_type == 'gwd':
+ odm_bbox = paddle.sum(odm_bbox, axis=-1)
+ feat_bbox_weights = paddle.sum(feat_bbox_weights, axis=-1)
+ try:
+ from rbox_iou_ops import rbox_iou
+ except Exception as e:
+ print("import custom_ops error, try install rbox_iou_ops " \
+ "following ppdet/ext_op/README.md", e)
+ sys.stdout.flush()
+ sys.exit(-1)
+ # calc iou
+ odm_bbox_decode = self.delta2rbox(self.refine_anchor_list[idx],
+ odm_bbox_pred)
+ bbox_gt_bboxes = paddle.to_tensor(
+ bbox_gt_bboxes,
+ dtype=odm_bbox_decode.dtype,
+ place=odm_bbox_decode.place)
+ bbox_gt_bboxes.stop_gradient = True
+ iou = rbox_iou(odm_bbox_decode, bbox_gt_bboxes)
+ iou = paddle.diag(iou)
+
+ if reg_loss_type == 'gwd':
+ bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx +
+ feat_anchor_num, :]
+ odm_bbox_total = self.gwd_loss(odm_bbox_decode,
+ bbox_gt_bboxes_level)
+ odm_bbox_total = odm_bbox_total * feat_bbox_weights
+ odm_bbox_total = paddle.sum(
+ odm_bbox_total) / num_total_samples
+
+ odm_bbox_losses.append(odm_bbox_total)
+ st_idx += feat_anchor_num
+
+ odm_cls_loss = paddle.add_n(odm_cls_losses)
+ odm_cls_loss_weight = paddle.to_tensor(
+ self.cls_loss_weight[1], dtype='float32', stop_gradient=True)
+ odm_cls_loss = odm_cls_loss * odm_cls_loss_weight
+ odm_reg_loss = paddle.add_n(odm_bbox_losses)
+ return odm_cls_loss, odm_reg_loss
+
+ def get_loss(self, inputs):
+ # inputs: im_id image im_shape scale_factor gt_bbox gt_class is_crowd
+
+ # compute loss
+ fam_cls_loss_lst = []
+ fam_reg_loss_lst = []
+ odm_cls_loss_lst = []
+ odm_reg_loss_lst = []
+
+ im_shape = inputs['im_shape']
+ for im_id in range(im_shape.shape[0]):
+ np_im_shape = inputs['im_shape'][im_id].numpy()
+ np_scale_factor = inputs['scale_factor'][im_id].numpy()
+ # data_format: (xc, yc, w, h, theta)
+ gt_bboxes = inputs['gt_rbox'][im_id].numpy()
+ gt_labels = inputs['gt_class'][im_id].numpy()
+ is_crowd = inputs['is_crowd'][im_id].numpy()
+ gt_labels = gt_labels + 1
+
+ # featmap_sizes
+ anchors_list_all = np.concatenate(self.base_anchors_list)
+
+ # get im_feat
+ fam_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[0]]
+ fam_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[1]]
+ odm_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[2]]
+ odm_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[3]]
+ im_s2anet_head_out = (fam_cls_feats_list, fam_reg_feats_list,
+ odm_cls_feats_list, odm_reg_feats_list)
+
+ # FAM
+ im_fam_target = self.anchor_assign(anchors_list_all, gt_bboxes,
+ gt_labels, is_crowd)
+ if im_fam_target is not None:
+ im_fam_cls_loss, im_fam_reg_loss = self.get_fam_loss(
+ im_fam_target, im_s2anet_head_out, self.reg_loss_type)
+ fam_cls_loss_lst.append(im_fam_cls_loss)
+ fam_reg_loss_lst.append(im_fam_reg_loss)
+
+ # ODM
+ np_refine_anchors_list = paddle.concat(
+ self.refine_anchor_list).numpy()
+ np_refine_anchors_list = np.concatenate(np_refine_anchors_list)
+ np_refine_anchors_list = np_refine_anchors_list.reshape(-1, 5)
+ im_odm_target = self.anchor_assign(np_refine_anchors_list,
+ gt_bboxes, gt_labels, is_crowd)
+
+ if im_odm_target is not None:
+ im_odm_cls_loss, im_odm_reg_loss = self.get_odm_loss(
+ im_odm_target, im_s2anet_head_out, self.reg_loss_type)
+ odm_cls_loss_lst.append(im_odm_cls_loss)
+ odm_reg_loss_lst.append(im_odm_reg_loss)
+ fam_cls_loss = paddle.add_n(fam_cls_loss_lst)
+ fam_reg_loss = paddle.add_n(fam_reg_loss_lst)
+ odm_cls_loss = paddle.add_n(odm_cls_loss_lst)
+ odm_reg_loss = paddle.add_n(odm_reg_loss_lst)
+ return {
+ 'fam_cls_loss': fam_cls_loss,
+ 'fam_reg_loss': fam_reg_loss,
+ 'odm_cls_loss': odm_cls_loss,
+ 'odm_reg_loss': odm_reg_loss
+ }
+
+ def get_bboxes(self, cls_score_list, bbox_pred_list, mlvl_anchors, nms_pre,
+ cls_out_channels, use_sigmoid_cls):
+ assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
+
+ mlvl_bboxes = []
+ mlvl_scores = []
+
+ idx = 0
+ for cls_score, bbox_pred, anchors in zip(cls_score_list, bbox_pred_list,
+ mlvl_anchors):
+ cls_score = paddle.reshape(cls_score, [-1, cls_out_channels])
+ if use_sigmoid_cls:
+ scores = F.sigmoid(cls_score)
+ else:
+ scores = F.softmax(cls_score, axis=-1)
+
+ # bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 5)
+ bbox_pred = paddle.transpose(bbox_pred, [1, 2, 0])
+ bbox_pred = paddle.reshape(bbox_pred, [-1, 5])
+ anchors = paddle.reshape(anchors, [-1, 5])
+
+ if scores.shape[0] > nms_pre:
+ # Get maximum scores for foreground classes.
+ if use_sigmoid_cls:
+ max_scores = paddle.max(scores, axis=1)
+ else:
+ max_scores = paddle.max(scores[:, 1:], axis=1)
+
+ topk_val, topk_inds = paddle.topk(max_scores, nms_pre)
+ anchors = paddle.gather(anchors, topk_inds)
+ bbox_pred = paddle.gather(bbox_pred, topk_inds)
+ scores = paddle.gather(scores, topk_inds)
+
+ bbox_delta = paddle.reshape(bbox_pred, [-1, 5])
+ bboxes = self.delta2rbox(anchors, bbox_delta)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+
+ idx += 1
+
+ mlvl_bboxes = paddle.concat(mlvl_bboxes, axis=0)
+ mlvl_scores = paddle.concat(mlvl_scores)
+
+ return mlvl_scores, mlvl_bboxes
+
+ def rect2rbox(self, bboxes):
+ """
+ :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax)
+ :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle)
+ """
+ bboxes = paddle.reshape(bboxes, [-1, 4])
+ num_boxes = paddle.shape(bboxes)[0]
+ x_ctr = (bboxes[:, 2] + bboxes[:, 0]) / 2.0
+ y_ctr = (bboxes[:, 3] + bboxes[:, 1]) / 2.0
+ edges1 = paddle.abs(bboxes[:, 2] - bboxes[:, 0])
+ edges2 = paddle.abs(bboxes[:, 3] - bboxes[:, 1])
+
+ rbox_w = paddle.maximum(edges1, edges2)
+ rbox_h = paddle.minimum(edges1, edges2)
+
+ # set angle
+ inds = edges1 < edges2
+ inds = paddle.cast(inds, 'int32')
+ rboxes_angle = inds * np.pi / 2.0
+
+ rboxes = paddle.stack(
+ (x_ctr, y_ctr, rbox_w, rbox_h, rboxes_angle), axis=1)
+ return rboxes
+
+ # deltas to rbox
+ def delta2rbox(self, rrois, deltas, wh_ratio_clip=1e-6):
+ """
+ :param rrois: (cx, cy, w, h, theta)
+ :param deltas: (dx, dy, dw, dh, dtheta)
+ :param means: means of anchor
+ :param stds: stds of anchor
+ :param wh_ratio_clip: clip threshold of wh_ratio
+ :return:
+ """
+ deltas = paddle.reshape(deltas, [-1, 5])
+ rrois = paddle.reshape(rrois, [-1, 5])
+ # fix dy2st bug denorm_deltas = deltas * self.stds + self.means
+ denorm_deltas = paddle.add(
+ paddle.multiply(deltas, self.stds), self.means)
+
+ dx = denorm_deltas[:, 0]
+ dy = denorm_deltas[:, 1]
+ dw = denorm_deltas[:, 2]
+ dh = denorm_deltas[:, 3]
+ dangle = denorm_deltas[:, 4]
+ max_ratio = np.abs(np.log(wh_ratio_clip))
+ dw = paddle.clip(dw, min=-max_ratio, max=max_ratio)
+ dh = paddle.clip(dh, min=-max_ratio, max=max_ratio)
+
+ rroi_x = rrois[:, 0]
+ rroi_y = rrois[:, 1]
+ rroi_w = rrois[:, 2]
+ rroi_h = rrois[:, 3]
+ rroi_angle = rrois[:, 4]
+
+ gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin(
+ rroi_angle) + rroi_x
+ gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos(
+ rroi_angle) + rroi_y
+ gw = rroi_w * dw.exp()
+ gh = rroi_h * dh.exp()
+ ga = np.pi * dangle + rroi_angle
+ ga = (ga + np.pi / 4) % np.pi - np.pi / 4
+ ga = paddle.to_tensor(ga)
+ gw = paddle.to_tensor(gw, dtype='float32')
+ gh = paddle.to_tensor(gh, dtype='float32')
+ bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1)
+ return bboxes
+
+ def bbox_decode(self, bbox_preds, anchors):
+ """decode bbox from deltas
+ Args:
+ bbox_preds: [N,H,W,5]
+ anchors: [H*W,5]
+ return:
+ bboxes: [N,H,W,5]
+ """
+ num_imgs, H, W, _ = bbox_preds.shape
+ bbox_delta = paddle.reshape(bbox_preds, [-1, 5])
+ bboxes = self.delta2rbox(anchors, bbox_delta)
+ return bboxes
+
+ def trace(self, A):
+ tr = paddle.diagonal(A, axis1=-2, axis2=-1)
+ tr = paddle.sum(tr, axis=-1)
+ return tr
+
+ def sqrt_newton_schulz_autograd(self, A, numIters):
+ A_shape = A.shape
+ batchSize = A_shape[0]
+ dim = A_shape[1]
+
+ normA = A * A
+ normA = paddle.sum(normA, axis=1)
+ normA = paddle.sum(normA, axis=1)
+ normA = paddle.sqrt(normA)
+ normA1 = normA.reshape([batchSize, 1, 1])
+ Y = paddle.divide(A, paddle.expand_as(normA1, A))
+ I = paddle.eye(dim, dim).reshape([1, dim, dim])
+ l0 = []
+ for i in range(batchSize):
+ l0.append(I)
+ I = paddle.concat(l0, axis=0)
+ I.stop_gradient = False
+ Z = paddle.eye(dim, dim).reshape([1, dim, dim])
+ l1 = []
+ for i in range(batchSize):
+ l1.append(Z)
+ Z = paddle.concat(l1, axis=0)
+ Z.stop_gradient = False
+
+ for i in range(numIters):
+ T = 0.5 * (3.0 * I - Z.bmm(Y))
+ Y = Y.bmm(T)
+ Z = T.bmm(Z)
+ sA = Y * paddle.sqrt(normA1).reshape([batchSize, 1, 1])
+ sA = paddle.expand_as(sA, A)
+ return sA
+
+ def wasserstein_distance_sigma(sigma1, sigma2):
+ wasserstein_distance_item2 = paddle.matmul(
+ sigma1, sigma1) + paddle.matmul(
+ sigma2, sigma2) - 2 * self.sqrt_newton_schulz_autograd(
+ paddle.matmul(
+ paddle.matmul(sigma1, paddle.matmul(sigma2, sigma2)),
+ sigma1), 10)
+ wasserstein_distance_item2 = self.trace(wasserstein_distance_item2)
+
+ return wasserstein_distance_item2
+
+ def xywhr2xyrs(self, xywhr):
+ xywhr = paddle.reshape(xywhr, [-1, 5])
+ xy = xywhr[:, :2]
+ wh = paddle.clip(xywhr[:, 2:4], min=1e-7, max=1e7)
+ r = xywhr[:, 4]
+ cos_r = paddle.cos(r)
+ sin_r = paddle.sin(r)
+ R = paddle.stack(
+ (cos_r, -sin_r, sin_r, cos_r), axis=-1).reshape([-1, 2, 2])
+ S = 0.5 * paddle.nn.functional.diag_embed(wh)
+ return xy, R, S
+
+ def gwd_loss(self,
+ pred,
+ target,
+ fun='log',
+ tau=1.0,
+ alpha=1.0,
+ normalize=False):
+
+ xy_p, R_p, S_p = self.xywhr2xyrs(pred)
+ xy_t, R_t, S_t = self.xywhr2xyrs(target)
+
+ xy_distance = (xy_p - xy_t).square().sum(axis=-1)
+
+ Sigma_p = R_p.matmul(S_p.square()).matmul(R_p.transpose([0, 2, 1]))
+ Sigma_t = R_t.matmul(S_t.square()).matmul(R_t.transpose([0, 2, 1]))
+
+ whr_distance = paddle.diagonal(
+ S_p, axis1=-2, axis2=-1).square().sum(axis=-1)
+
+ whr_distance = whr_distance + paddle.diagonal(
+ S_t, axis1=-2, axis2=-1).square().sum(axis=-1)
+ _t = Sigma_p.matmul(Sigma_t)
+
+ _t_tr = paddle.diagonal(_t, axis1=-2, axis2=-1).sum(axis=-1)
+ _t_det_sqrt = paddle.diagonal(S_p, axis1=-2, axis2=-1).prod(axis=-1)
+ _t_det_sqrt = _t_det_sqrt * paddle.diagonal(
+ S_t, axis1=-2, axis2=-1).prod(axis=-1)
+ whr_distance = whr_distance + (-2) * (
+ (_t_tr + 2 * _t_det_sqrt).clip(0).sqrt())
+
+ distance = (xy_distance + alpha * alpha * whr_distance).clip(0)
+
+ if normalize:
+ wh_p = pred[..., 2:4].clip(min=1e-7, max=1e7)
+ wh_t = target[..., 2:4].clip(min=1e-7, max=1e7)
+ scale = ((wh_p.log() + wh_t.log()).sum(dim=-1) / 4).exp()
+ distance = distance / scale
+
+ if fun == 'log':
+ distance = paddle.log1p(distance)
+
+ if tau >= 1.0:
+ return 1 - 1 / (tau + distance)
+
+ return distance
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/simota_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/simota_head.py
new file mode 100644
index 000000000..a1485f390
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/simota_head.py
@@ -0,0 +1,498 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on:
+# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/yolox_head.py
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+from functools import partial
+import numpy as np
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import Normal, Constant
+
+from ppdet.core.workspace import register
+
+from ppdet.modeling.bbox_utils import distance2bbox, bbox2distance
+from ppdet.data.transform.atss_assigner import bbox_overlaps
+
+from .gfl_head import GFLHead
+
+
+@register
+class OTAHead(GFLHead):
+ """
+ OTAHead
+ Args:
+ conv_feat (object): Instance of 'FCOSFeat'
+ num_classes (int): Number of classes
+ fpn_stride (list): The stride of each FPN Layer
+ prior_prob (float): Used to set the bias init for the class prediction layer
+ loss_qfl (object): Instance of QualityFocalLoss.
+ loss_dfl (object): Instance of DistributionFocalLoss.
+ loss_bbox (object): Instance of bbox loss.
+ assigner (object): Instance of label assigner.
+ reg_max: Max value of integral set :math: `{0, ..., reg_max}`
+ n QFL setting. Default: 16.
+ """
+ __inject__ = [
+ 'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
+ 'assigner', 'nms'
+ ]
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ conv_feat='FCOSFeat',
+ dgqp_module=None,
+ num_classes=80,
+ fpn_stride=[8, 16, 32, 64, 128],
+ prior_prob=0.01,
+ loss_class='QualityFocalLoss',
+ loss_dfl='DistributionFocalLoss',
+ loss_bbox='GIoULoss',
+ assigner='SimOTAAssigner',
+ reg_max=16,
+ feat_in_chan=256,
+ nms=None,
+ nms_pre=1000,
+ cell_offset=0):
+ super(OTAHead, self).__init__(
+ conv_feat=conv_feat,
+ dgqp_module=dgqp_module,
+ num_classes=num_classes,
+ fpn_stride=fpn_stride,
+ prior_prob=prior_prob,
+ loss_class=loss_class,
+ loss_dfl=loss_dfl,
+ loss_bbox=loss_bbox,
+ reg_max=reg_max,
+ feat_in_chan=feat_in_chan,
+ nms=nms,
+ nms_pre=nms_pre,
+ cell_offset=cell_offset)
+ self.conv_feat = conv_feat
+ self.dgqp_module = dgqp_module
+ self.num_classes = num_classes
+ self.fpn_stride = fpn_stride
+ self.prior_prob = prior_prob
+ self.loss_qfl = loss_class
+ self.loss_dfl = loss_dfl
+ self.loss_bbox = loss_bbox
+ self.reg_max = reg_max
+ self.feat_in_chan = feat_in_chan
+ self.nms = nms
+ self.nms_pre = nms_pre
+ self.cell_offset = cell_offset
+ self.use_sigmoid = self.loss_qfl.use_sigmoid
+
+ self.assigner = assigner
+
+ def _get_target_single(self, flatten_cls_pred, flatten_center_and_stride,
+ flatten_bbox, gt_bboxes, gt_labels):
+ """Compute targets for priors in a single image.
+ """
+ pos_num, label, label_weight, bbox_target = self.assigner(
+ F.sigmoid(flatten_cls_pred), flatten_center_and_stride,
+ flatten_bbox, gt_bboxes, gt_labels)
+
+ return (pos_num, label, label_weight, bbox_target)
+
+ def get_loss(self, head_outs, gt_meta):
+ cls_scores, bbox_preds = head_outs
+ num_level_anchors = [
+ featmap.shape[-2] * featmap.shape[-1] for featmap in cls_scores
+ ]
+ num_imgs = gt_meta['im_id'].shape[0]
+ featmap_sizes = [[featmap.shape[-2], featmap.shape[-1]]
+ for featmap in cls_scores]
+
+ decode_bbox_preds = []
+ center_and_strides = []
+ for featmap_size, stride, bbox_pred in zip(featmap_sizes,
+ self.fpn_stride, bbox_preds):
+
+ # center in origin image
+ yy, xx = self.get_single_level_center_point(featmap_size, stride,
+ self.cell_offset)
+
+ center_and_stride = paddle.stack([xx, yy, stride, stride], -1).tile(
+ [num_imgs, 1, 1])
+ center_and_strides.append(center_and_stride)
+ center_in_feature = center_and_stride.reshape(
+ [-1, 4])[:, :-2] / stride
+ bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
+ [num_imgs, -1, 4 * (self.reg_max + 1)])
+ pred_distances = self.distribution_project(bbox_pred)
+ decode_bbox_pred_wo_stride = distance2bbox(
+ center_in_feature, pred_distances).reshape([num_imgs, -1, 4])
+ decode_bbox_preds.append(decode_bbox_pred_wo_stride * stride)
+
+ flatten_cls_preds = [
+ cls_pred.transpose([0, 2, 3, 1]).reshape(
+ [num_imgs, -1, self.cls_out_channels])
+ for cls_pred in cls_scores
+ ]
+ flatten_cls_preds = paddle.concat(flatten_cls_preds, axis=1)
+ flatten_bboxes = paddle.concat(decode_bbox_preds, axis=1)
+ flatten_center_and_strides = paddle.concat(center_and_strides, axis=1)
+
+ gt_boxes, gt_labels = gt_meta['gt_bbox'], gt_meta['gt_class']
+ pos_num_l, label_l, label_weight_l, bbox_target_l = [], [], [], []
+ for flatten_cls_pred,flatten_center_and_stride,flatten_bbox,gt_box, gt_label \
+ in zip(flatten_cls_preds.detach(),flatten_center_and_strides.detach(), \
+ flatten_bboxes.detach(),gt_boxes, gt_labels):
+ pos_num, label, label_weight, bbox_target = self._get_target_single(
+ flatten_cls_pred, flatten_center_and_stride, flatten_bbox,
+ gt_box, gt_label)
+ pos_num_l.append(pos_num)
+ label_l.append(label)
+ label_weight_l.append(label_weight)
+ bbox_target_l.append(bbox_target)
+
+ labels = paddle.to_tensor(np.stack(label_l, axis=0))
+ label_weights = paddle.to_tensor(np.stack(label_weight_l, axis=0))
+ bbox_targets = paddle.to_tensor(np.stack(bbox_target_l, axis=0))
+
+ center_and_strides_list = self._images_to_levels(
+ flatten_center_and_strides, num_level_anchors)
+ labels_list = self._images_to_levels(labels, num_level_anchors)
+ label_weights_list = self._images_to_levels(label_weights,
+ num_level_anchors)
+ bbox_targets_list = self._images_to_levels(bbox_targets,
+ num_level_anchors)
+ num_total_pos = sum(pos_num_l)
+ try:
+ num_total_pos = paddle.distributed.all_reduce(num_total_pos.clone(
+ )) / paddle.distributed.get_world_size()
+ except:
+ num_total_pos = max(num_total_pos, 1)
+
+ loss_bbox_list, loss_dfl_list, loss_qfl_list, avg_factor = [], [], [], []
+ for cls_score, bbox_pred, center_and_strides, labels, label_weights, bbox_targets, stride in zip(
+ cls_scores, bbox_preds, center_and_strides_list, labels_list,
+ label_weights_list, bbox_targets_list, self.fpn_stride):
+ center_and_strides = center_and_strides.reshape([-1, 4])
+ cls_score = cls_score.transpose([0, 2, 3, 1]).reshape(
+ [-1, self.cls_out_channels])
+ bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
+ [-1, 4 * (self.reg_max + 1)])
+ bbox_targets = bbox_targets.reshape([-1, 4])
+ labels = labels.reshape([-1])
+ label_weights = label_weights.reshape([-1])
+
+ bg_class_ind = self.num_classes
+ pos_inds = paddle.nonzero(
+ paddle.logical_and((labels >= 0), (labels < bg_class_ind)),
+ as_tuple=False).squeeze(1)
+ score = np.zeros(labels.shape)
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = paddle.gather(bbox_targets, pos_inds, axis=0)
+ pos_bbox_pred = paddle.gather(bbox_pred, pos_inds, axis=0)
+ pos_centers = paddle.gather(
+ center_and_strides[:, :-2], pos_inds, axis=0) / stride
+
+ weight_targets = F.sigmoid(cls_score.detach())
+ weight_targets = paddle.gather(
+ weight_targets.max(axis=1, keepdim=True), pos_inds, axis=0)
+ pos_bbox_pred_corners = self.distribution_project(pos_bbox_pred)
+ pos_decode_bbox_pred = distance2bbox(pos_centers,
+ pos_bbox_pred_corners)
+ pos_decode_bbox_targets = pos_bbox_targets / stride
+ bbox_iou = bbox_overlaps(
+ pos_decode_bbox_pred.detach().numpy(),
+ pos_decode_bbox_targets.detach().numpy(),
+ is_aligned=True)
+ score[pos_inds.numpy()] = bbox_iou
+
+ pred_corners = pos_bbox_pred.reshape([-1, self.reg_max + 1])
+ target_corners = bbox2distance(pos_centers,
+ pos_decode_bbox_targets,
+ self.reg_max).reshape([-1])
+ # regression loss
+ loss_bbox = paddle.sum(
+ self.loss_bbox(pos_decode_bbox_pred,
+ pos_decode_bbox_targets) * weight_targets)
+
+ # dfl loss
+ loss_dfl = self.loss_dfl(
+ pred_corners,
+ target_corners,
+ weight=weight_targets.expand([-1, 4]).reshape([-1]),
+ avg_factor=4.0)
+ else:
+ loss_bbox = bbox_pred.sum() * 0
+ loss_dfl = bbox_pred.sum() * 0
+ weight_targets = paddle.to_tensor([0], dtype='float32')
+
+ # qfl loss
+ score = paddle.to_tensor(score)
+ loss_qfl = self.loss_qfl(
+ cls_score, (labels, score),
+ weight=label_weights,
+ avg_factor=num_total_pos)
+ loss_bbox_list.append(loss_bbox)
+ loss_dfl_list.append(loss_dfl)
+ loss_qfl_list.append(loss_qfl)
+ avg_factor.append(weight_targets.sum())
+
+ avg_factor = sum(avg_factor)
+ try:
+ avg_factor = paddle.distributed.all_reduce(avg_factor.clone())
+ avg_factor = paddle.clip(
+ avg_factor / paddle.distributed.get_world_size(), min=1)
+ except:
+ avg_factor = max(avg_factor.item(), 1)
+ if avg_factor <= 0:
+ loss_qfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
+ loss_bbox = paddle.to_tensor(
+ 0, dtype='float32', stop_gradient=False)
+ loss_dfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
+ else:
+ losses_bbox = list(map(lambda x: x / avg_factor, loss_bbox_list))
+ losses_dfl = list(map(lambda x: x / avg_factor, loss_dfl_list))
+ loss_qfl = sum(loss_qfl_list)
+ loss_bbox = sum(losses_bbox)
+ loss_dfl = sum(losses_dfl)
+
+ loss_states = dict(
+ loss_qfl=loss_qfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
+
+ return loss_states
+
+
+@register
+class OTAVFLHead(OTAHead):
+ __inject__ = [
+ 'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
+ 'assigner', 'nms'
+ ]
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ conv_feat='FCOSFeat',
+ dgqp_module=None,
+ num_classes=80,
+ fpn_stride=[8, 16, 32, 64, 128],
+ prior_prob=0.01,
+ loss_class='VarifocalLoss',
+ loss_dfl='DistributionFocalLoss',
+ loss_bbox='GIoULoss',
+ assigner='SimOTAAssigner',
+ reg_max=16,
+ feat_in_chan=256,
+ nms=None,
+ nms_pre=1000,
+ cell_offset=0):
+ super(OTAVFLHead, self).__init__(
+ conv_feat=conv_feat,
+ dgqp_module=dgqp_module,
+ num_classes=num_classes,
+ fpn_stride=fpn_stride,
+ prior_prob=prior_prob,
+ loss_class=loss_class,
+ loss_dfl=loss_dfl,
+ loss_bbox=loss_bbox,
+ reg_max=reg_max,
+ feat_in_chan=feat_in_chan,
+ nms=nms,
+ nms_pre=nms_pre,
+ cell_offset=cell_offset)
+ self.conv_feat = conv_feat
+ self.dgqp_module = dgqp_module
+ self.num_classes = num_classes
+ self.fpn_stride = fpn_stride
+ self.prior_prob = prior_prob
+ self.loss_vfl = loss_class
+ self.loss_dfl = loss_dfl
+ self.loss_bbox = loss_bbox
+ self.reg_max = reg_max
+ self.feat_in_chan = feat_in_chan
+ self.nms = nms
+ self.nms_pre = nms_pre
+ self.cell_offset = cell_offset
+ self.use_sigmoid = self.loss_vfl.use_sigmoid
+
+ self.assigner = assigner
+
+ def get_loss(self, head_outs, gt_meta):
+ cls_scores, bbox_preds = head_outs
+ num_level_anchors = [
+ featmap.shape[-2] * featmap.shape[-1] for featmap in cls_scores
+ ]
+ num_imgs = gt_meta['im_id'].shape[0]
+ featmap_sizes = [[featmap.shape[-2], featmap.shape[-1]]
+ for featmap in cls_scores]
+
+ decode_bbox_preds = []
+ center_and_strides = []
+ for featmap_size, stride, bbox_pred in zip(featmap_sizes,
+ self.fpn_stride, bbox_preds):
+ # center in origin image
+ yy, xx = self.get_single_level_center_point(featmap_size, stride,
+ self.cell_offset)
+ strides = paddle.full((len(xx), ), stride)
+ center_and_stride = paddle.stack([xx, yy, strides, strides],
+ -1).tile([num_imgs, 1, 1])
+ center_and_strides.append(center_and_stride)
+ center_in_feature = center_and_stride.reshape(
+ [-1, 4])[:, :-2] / stride
+ bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
+ [num_imgs, -1, 4 * (self.reg_max + 1)])
+ pred_distances = self.distribution_project(bbox_pred)
+ decode_bbox_pred_wo_stride = distance2bbox(
+ center_in_feature, pred_distances).reshape([num_imgs, -1, 4])
+ decode_bbox_preds.append(decode_bbox_pred_wo_stride * stride)
+
+ flatten_cls_preds = [
+ cls_pred.transpose([0, 2, 3, 1]).reshape(
+ [num_imgs, -1, self.cls_out_channels])
+ for cls_pred in cls_scores
+ ]
+ flatten_cls_preds = paddle.concat(flatten_cls_preds, axis=1)
+ flatten_bboxes = paddle.concat(decode_bbox_preds, axis=1)
+ flatten_center_and_strides = paddle.concat(center_and_strides, axis=1)
+
+ gt_boxes, gt_labels = gt_meta['gt_bbox'], gt_meta['gt_class']
+ pos_num_l, label_l, label_weight_l, bbox_target_l = [], [], [], []
+ for flatten_cls_pred, flatten_center_and_stride, flatten_bbox,gt_box,gt_label \
+ in zip(flatten_cls_preds.detach(), flatten_center_and_strides.detach(), \
+ flatten_bboxes.detach(),gt_boxes,gt_labels):
+ pos_num, label, label_weight, bbox_target = self._get_target_single(
+ flatten_cls_pred, flatten_center_and_stride, flatten_bbox,
+ gt_box, gt_label)
+ pos_num_l.append(pos_num)
+ label_l.append(label)
+ label_weight_l.append(label_weight)
+ bbox_target_l.append(bbox_target)
+
+ labels = paddle.to_tensor(np.stack(label_l, axis=0))
+ label_weights = paddle.to_tensor(np.stack(label_weight_l, axis=0))
+ bbox_targets = paddle.to_tensor(np.stack(bbox_target_l, axis=0))
+
+ center_and_strides_list = self._images_to_levels(
+ flatten_center_and_strides, num_level_anchors)
+ labels_list = self._images_to_levels(labels, num_level_anchors)
+ label_weights_list = self._images_to_levels(label_weights,
+ num_level_anchors)
+ bbox_targets_list = self._images_to_levels(bbox_targets,
+ num_level_anchors)
+ num_total_pos = sum(pos_num_l)
+ try:
+ num_total_pos = paddle.distributed.all_reduce(num_total_pos.clone(
+ )) / paddle.distributed.get_world_size()
+ except:
+ num_total_pos = max(num_total_pos, 1)
+
+ loss_bbox_list, loss_dfl_list, loss_vfl_list, avg_factor = [], [], [], []
+ for cls_score, bbox_pred, center_and_strides, labels, label_weights, bbox_targets, stride in zip(
+ cls_scores, bbox_preds, center_and_strides_list, labels_list,
+ label_weights_list, bbox_targets_list, self.fpn_stride):
+ center_and_strides = center_and_strides.reshape([-1, 4])
+ cls_score = cls_score.transpose([0, 2, 3, 1]).reshape(
+ [-1, self.cls_out_channels])
+ bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
+ [-1, 4 * (self.reg_max + 1)])
+ bbox_targets = bbox_targets.reshape([-1, 4])
+ labels = labels.reshape([-1])
+
+ bg_class_ind = self.num_classes
+ pos_inds = paddle.nonzero(
+ paddle.logical_and((labels >= 0), (labels < bg_class_ind)),
+ as_tuple=False).squeeze(1)
+ # vfl
+ vfl_score = np.zeros(cls_score.shape)
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = paddle.gather(bbox_targets, pos_inds, axis=0)
+ pos_bbox_pred = paddle.gather(bbox_pred, pos_inds, axis=0)
+ pos_centers = paddle.gather(
+ center_and_strides[:, :-2], pos_inds, axis=0) / stride
+
+ weight_targets = F.sigmoid(cls_score.detach())
+ weight_targets = paddle.gather(
+ weight_targets.max(axis=1, keepdim=True), pos_inds, axis=0)
+ pos_bbox_pred_corners = self.distribution_project(pos_bbox_pred)
+ pos_decode_bbox_pred = distance2bbox(pos_centers,
+ pos_bbox_pred_corners)
+ pos_decode_bbox_targets = pos_bbox_targets / stride
+ bbox_iou = bbox_overlaps(
+ pos_decode_bbox_pred.detach().numpy(),
+ pos_decode_bbox_targets.detach().numpy(),
+ is_aligned=True)
+
+ # vfl
+ pos_labels = paddle.gather(labels, pos_inds, axis=0)
+ vfl_score[pos_inds.numpy(), pos_labels] = bbox_iou
+
+ pred_corners = pos_bbox_pred.reshape([-1, self.reg_max + 1])
+ target_corners = bbox2distance(pos_centers,
+ pos_decode_bbox_targets,
+ self.reg_max).reshape([-1])
+ # regression loss
+ loss_bbox = paddle.sum(
+ self.loss_bbox(pos_decode_bbox_pred,
+ pos_decode_bbox_targets) * weight_targets)
+
+ # dfl loss
+ loss_dfl = self.loss_dfl(
+ pred_corners,
+ target_corners,
+ weight=weight_targets.expand([-1, 4]).reshape([-1]),
+ avg_factor=4.0)
+ else:
+ loss_bbox = bbox_pred.sum() * 0
+ loss_dfl = bbox_pred.sum() * 0
+ weight_targets = paddle.to_tensor([0], dtype='float32')
+
+ # vfl loss
+ num_pos_avg_per_gpu = num_total_pos
+ vfl_score = paddle.to_tensor(vfl_score)
+ loss_vfl = self.loss_vfl(
+ cls_score, vfl_score, avg_factor=num_pos_avg_per_gpu)
+
+ loss_bbox_list.append(loss_bbox)
+ loss_dfl_list.append(loss_dfl)
+ loss_vfl_list.append(loss_vfl)
+ avg_factor.append(weight_targets.sum())
+
+ avg_factor = sum(avg_factor)
+ try:
+ avg_factor = paddle.distributed.all_reduce(avg_factor.clone())
+ avg_factor = paddle.clip(
+ avg_factor / paddle.distributed.get_world_size(), min=1)
+ except:
+ avg_factor = max(avg_factor.item(), 1)
+ if avg_factor <= 0:
+ loss_vfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
+ loss_bbox = paddle.to_tensor(
+ 0, dtype='float32', stop_gradient=False)
+ loss_dfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
+ else:
+ losses_bbox = list(map(lambda x: x / avg_factor, loss_bbox_list))
+ losses_dfl = list(map(lambda x: x / avg_factor, loss_dfl_list))
+ loss_vfl = sum(loss_vfl_list)
+ loss_bbox = sum(losses_bbox)
+ loss_dfl = sum(losses_dfl)
+
+ loss_states = dict(
+ loss_vfl=loss_vfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
+
+ return loss_states
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/solov2_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/solov2_head.py
new file mode 100644
index 000000000..6989abb3a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/solov2_head.py
@@ -0,0 +1,554 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from paddle import ParamAttr
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import Normal, Constant
+
+from ppdet.modeling.layers import ConvNormLayer, MaskMatrixNMS, DropBlock
+from ppdet.core.workspace import register
+
+from six.moves import zip
+import numpy as np
+
+__all__ = ['SOLOv2Head']
+
+
+@register
+class SOLOv2MaskHead(nn.Layer):
+ """
+ MaskHead of SOLOv2.
+ The code of this function is based on:
+ https://github.com/WXinlong/SOLO/blob/master/mmdet/models/mask_heads/mask_feat_head.py
+
+ Args:
+ in_channels (int): The channel number of input Tensor.
+ out_channels (int): The channel number of output Tensor.
+ start_level (int): The position where the input starts.
+ end_level (int): The position where the input ends.
+ use_dcn_in_tower (bool): Whether to use dcn in tower or not.
+ """
+ __shared__ = ['norm_type']
+
+ def __init__(self,
+ in_channels=256,
+ mid_channels=128,
+ out_channels=256,
+ start_level=0,
+ end_level=3,
+ use_dcn_in_tower=False,
+ norm_type='gn'):
+ super(SOLOv2MaskHead, self).__init__()
+ assert start_level >= 0 and end_level >= start_level
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.mid_channels = mid_channels
+ self.use_dcn_in_tower = use_dcn_in_tower
+ self.range_level = end_level - start_level + 1
+ self.use_dcn = True if self.use_dcn_in_tower else False
+ self.convs_all_levels = []
+ self.norm_type = norm_type
+ for i in range(start_level, end_level + 1):
+ conv_feat_name = 'mask_feat_head.convs_all_levels.{}'.format(i)
+ conv_pre_feat = nn.Sequential()
+ if i == start_level:
+ conv_pre_feat.add_sublayer(
+ conv_feat_name + '.conv' + str(i),
+ ConvNormLayer(
+ ch_in=self.in_channels,
+ ch_out=self.mid_channels,
+ filter_size=3,
+ stride=1,
+ use_dcn=self.use_dcn,
+ norm_type=self.norm_type))
+ self.add_sublayer('conv_pre_feat' + str(i), conv_pre_feat)
+ self.convs_all_levels.append(conv_pre_feat)
+ else:
+ for j in range(i):
+ ch_in = 0
+ if j == 0:
+ ch_in = self.in_channels + 2 if i == end_level else self.in_channels
+ else:
+ ch_in = self.mid_channels
+ conv_pre_feat.add_sublayer(
+ conv_feat_name + '.conv' + str(j),
+ ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=self.mid_channels,
+ filter_size=3,
+ stride=1,
+ use_dcn=self.use_dcn,
+ norm_type=self.norm_type))
+ conv_pre_feat.add_sublayer(
+ conv_feat_name + '.conv' + str(j) + 'act', nn.ReLU())
+ conv_pre_feat.add_sublayer(
+ 'upsample' + str(i) + str(j),
+ nn.Upsample(
+ scale_factor=2, mode='bilinear'))
+ self.add_sublayer('conv_pre_feat' + str(i), conv_pre_feat)
+ self.convs_all_levels.append(conv_pre_feat)
+
+ conv_pred_name = 'mask_feat_head.conv_pred.0'
+ self.conv_pred = self.add_sublayer(
+ conv_pred_name,
+ ConvNormLayer(
+ ch_in=self.mid_channels,
+ ch_out=self.out_channels,
+ filter_size=1,
+ stride=1,
+ use_dcn=self.use_dcn,
+ norm_type=self.norm_type))
+
+ def forward(self, inputs):
+ """
+ Get SOLOv2MaskHead output.
+
+ Args:
+ inputs(list[Tensor]): feature map from each necks with shape of [N, C, H, W]
+ Returns:
+ ins_pred(Tensor): Output of SOLOv2MaskHead head
+ """
+ feat_all_level = F.relu(self.convs_all_levels[0](inputs[0]))
+ for i in range(1, self.range_level):
+ input_p = inputs[i]
+ if i == (self.range_level - 1):
+ input_feat = input_p
+ x_range = paddle.linspace(
+ -1, 1, paddle.shape(input_feat)[-1], dtype='float32')
+ y_range = paddle.linspace(
+ -1, 1, paddle.shape(input_feat)[-2], dtype='float32')
+ y, x = paddle.meshgrid([y_range, x_range])
+ x = paddle.unsqueeze(x, [0, 1])
+ y = paddle.unsqueeze(y, [0, 1])
+ y = paddle.expand(
+ y, shape=[paddle.shape(input_feat)[0], 1, -1, -1])
+ x = paddle.expand(
+ x, shape=[paddle.shape(input_feat)[0], 1, -1, -1])
+ coord_feat = paddle.concat([x, y], axis=1)
+ input_p = paddle.concat([input_p, coord_feat], axis=1)
+ feat_all_level = paddle.add(feat_all_level,
+ self.convs_all_levels[i](input_p))
+ ins_pred = F.relu(self.conv_pred(feat_all_level))
+
+ return ins_pred
+
+
+@register
+class SOLOv2Head(nn.Layer):
+ """
+ Head block for SOLOv2 network
+
+ Args:
+ num_classes (int): Number of output classes.
+ in_channels (int): Number of input channels.
+ seg_feat_channels (int): Num_filters of kernel & categroy branch convolution operation.
+ stacked_convs (int): Times of convolution operation.
+ num_grids (list[int]): List of feature map grids size.
+ kernel_out_channels (int): Number of output channels in kernel branch.
+ dcn_v2_stages (list): Which stage use dcn v2 in tower. It is between [0, stacked_convs).
+ segm_strides (list[int]): List of segmentation area stride.
+ solov2_loss (object): SOLOv2Loss instance.
+ score_threshold (float): Threshold of categroy score.
+ mask_nms (object): MaskMatrixNMS instance.
+ """
+ __inject__ = ['solov2_loss', 'mask_nms']
+ __shared__ = ['norm_type', 'num_classes']
+
+ def __init__(self,
+ num_classes=80,
+ in_channels=256,
+ seg_feat_channels=256,
+ stacked_convs=4,
+ num_grids=[40, 36, 24, 16, 12],
+ kernel_out_channels=256,
+ dcn_v2_stages=[],
+ segm_strides=[8, 8, 16, 32, 32],
+ solov2_loss=None,
+ score_threshold=0.1,
+ mask_threshold=0.5,
+ mask_nms=None,
+ norm_type='gn',
+ drop_block=False):
+ super(SOLOv2Head, self).__init__()
+ self.num_classes = num_classes
+ self.in_channels = in_channels
+ self.seg_num_grids = num_grids
+ self.cate_out_channels = self.num_classes
+ self.seg_feat_channels = seg_feat_channels
+ self.stacked_convs = stacked_convs
+ self.kernel_out_channels = kernel_out_channels
+ self.dcn_v2_stages = dcn_v2_stages
+ self.segm_strides = segm_strides
+ self.solov2_loss = solov2_loss
+ self.mask_nms = mask_nms
+ self.score_threshold = score_threshold
+ self.mask_threshold = mask_threshold
+ self.norm_type = norm_type
+ self.drop_block = drop_block
+
+ self.kernel_pred_convs = []
+ self.cate_pred_convs = []
+ for i in range(self.stacked_convs):
+ use_dcn = True if i in self.dcn_v2_stages else False
+ ch_in = self.in_channels + 2 if i == 0 else self.seg_feat_channels
+ kernel_conv = self.add_sublayer(
+ 'bbox_head.kernel_convs.' + str(i),
+ ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=self.seg_feat_channels,
+ filter_size=3,
+ stride=1,
+ use_dcn=use_dcn,
+ norm_type=self.norm_type))
+ self.kernel_pred_convs.append(kernel_conv)
+ ch_in = self.in_channels if i == 0 else self.seg_feat_channels
+ cate_conv = self.add_sublayer(
+ 'bbox_head.cate_convs.' + str(i),
+ ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=self.seg_feat_channels,
+ filter_size=3,
+ stride=1,
+ use_dcn=use_dcn,
+ norm_type=self.norm_type))
+ self.cate_pred_convs.append(cate_conv)
+
+ self.solo_kernel = self.add_sublayer(
+ 'bbox_head.solo_kernel',
+ nn.Conv2D(
+ self.seg_feat_channels,
+ self.kernel_out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=True))
+ self.solo_cate = self.add_sublayer(
+ 'bbox_head.solo_cate',
+ nn.Conv2D(
+ self.seg_feat_channels,
+ self.cate_out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.01)),
+ bias_attr=ParamAttr(initializer=Constant(
+ value=float(-np.log((1 - 0.01) / 0.01))))))
+
+ if self.drop_block and self.training:
+ self.drop_block_fun = DropBlock(
+ block_size=3, keep_prob=0.9, name='solo_cate.dropblock')
+
+ def _points_nms(self, heat, kernel_size=2):
+ hmax = F.max_pool2d(heat, kernel_size=kernel_size, stride=1, padding=1)
+ keep = paddle.cast((hmax[:, :, :-1, :-1] == heat), 'float32')
+ return heat * keep
+
+ def _split_feats(self, feats):
+ return (F.interpolate(
+ feats[0],
+ scale_factor=0.5,
+ align_corners=False,
+ align_mode=0,
+ mode='bilinear'), feats[1], feats[2], feats[3], F.interpolate(
+ feats[4],
+ size=paddle.shape(feats[3])[-2:],
+ mode='bilinear',
+ align_corners=False,
+ align_mode=0))
+
+ def forward(self, input):
+ """
+ Get SOLOv2 head output
+
+ Args:
+ input (list): List of Tensors, output of backbone or neck stages
+ Returns:
+ cate_pred_list (list): Tensors of each category branch layer
+ kernel_pred_list (list): Tensors of each kernel branch layer
+ """
+ feats = self._split_feats(input)
+ cate_pred_list = []
+ kernel_pred_list = []
+ for idx in range(len(self.seg_num_grids)):
+ cate_pred, kernel_pred = self._get_output_single(feats[idx], idx)
+ cate_pred_list.append(cate_pred)
+ kernel_pred_list.append(kernel_pred)
+
+ return cate_pred_list, kernel_pred_list
+
+ def _get_output_single(self, input, idx):
+ ins_kernel_feat = input
+ # CoordConv
+ x_range = paddle.linspace(
+ -1, 1, paddle.shape(ins_kernel_feat)[-1], dtype='float32')
+ y_range = paddle.linspace(
+ -1, 1, paddle.shape(ins_kernel_feat)[-2], dtype='float32')
+ y, x = paddle.meshgrid([y_range, x_range])
+ x = paddle.unsqueeze(x, [0, 1])
+ y = paddle.unsqueeze(y, [0, 1])
+ y = paddle.expand(
+ y, shape=[paddle.shape(ins_kernel_feat)[0], 1, -1, -1])
+ x = paddle.expand(
+ x, shape=[paddle.shape(ins_kernel_feat)[0], 1, -1, -1])
+ coord_feat = paddle.concat([x, y], axis=1)
+ ins_kernel_feat = paddle.concat([ins_kernel_feat, coord_feat], axis=1)
+
+ # kernel branch
+ kernel_feat = ins_kernel_feat
+ seg_num_grid = self.seg_num_grids[idx]
+ kernel_feat = F.interpolate(
+ kernel_feat,
+ size=[seg_num_grid, seg_num_grid],
+ mode='bilinear',
+ align_corners=False,
+ align_mode=0)
+ cate_feat = kernel_feat[:, :-2, :, :]
+
+ for kernel_layer in self.kernel_pred_convs:
+ kernel_feat = F.relu(kernel_layer(kernel_feat))
+ if self.drop_block and self.training:
+ kernel_feat = self.drop_block_fun(kernel_feat)
+ kernel_pred = self.solo_kernel(kernel_feat)
+ # cate branch
+ for cate_layer in self.cate_pred_convs:
+ cate_feat = F.relu(cate_layer(cate_feat))
+ if self.drop_block and self.training:
+ cate_feat = self.drop_block_fun(cate_feat)
+ cate_pred = self.solo_cate(cate_feat)
+
+ if not self.training:
+ cate_pred = self._points_nms(F.sigmoid(cate_pred), kernel_size=2)
+ cate_pred = paddle.transpose(cate_pred, [0, 2, 3, 1])
+ return cate_pred, kernel_pred
+
+ def get_loss(self, cate_preds, kernel_preds, ins_pred, ins_labels,
+ cate_labels, grid_order_list, fg_num):
+ """
+ Get loss of network of SOLOv2.
+
+ Args:
+ cate_preds (list): Tensor list of categroy branch output.
+ kernel_preds (list): Tensor list of kernel branch output.
+ ins_pred (list): Tensor list of instance branch output.
+ ins_labels (list): List of instance labels pre batch.
+ cate_labels (list): List of categroy labels pre batch.
+ grid_order_list (list): List of index in pre grid.
+ fg_num (int): Number of positive samples in a mini-batch.
+ Returns:
+ loss_ins (Tensor): The instance loss Tensor of SOLOv2 network.
+ loss_cate (Tensor): The category loss Tensor of SOLOv2 network.
+ """
+ batch_size = paddle.shape(grid_order_list[0])[0]
+ ins_pred_list = []
+ for kernel_preds_level, grid_orders_level in zip(kernel_preds,
+ grid_order_list):
+ if grid_orders_level.shape[1] == 0:
+ ins_pred_list.append(None)
+ continue
+ grid_orders_level = paddle.reshape(grid_orders_level, [-1])
+ reshape_pred = paddle.reshape(
+ kernel_preds_level,
+ shape=(paddle.shape(kernel_preds_level)[0],
+ paddle.shape(kernel_preds_level)[1], -1))
+ reshape_pred = paddle.transpose(reshape_pred, [0, 2, 1])
+ reshape_pred = paddle.reshape(
+ reshape_pred, shape=(-1, paddle.shape(reshape_pred)[2]))
+ gathered_pred = paddle.gather(reshape_pred, index=grid_orders_level)
+ gathered_pred = paddle.reshape(
+ gathered_pred,
+ shape=[batch_size, -1, paddle.shape(gathered_pred)[1]])
+ cur_ins_pred = ins_pred
+ cur_ins_pred = paddle.reshape(
+ cur_ins_pred,
+ shape=(paddle.shape(cur_ins_pred)[0],
+ paddle.shape(cur_ins_pred)[1], -1))
+ ins_pred_conv = paddle.matmul(gathered_pred, cur_ins_pred)
+ cur_ins_pred = paddle.reshape(
+ ins_pred_conv,
+ shape=(-1, paddle.shape(ins_pred)[-2],
+ paddle.shape(ins_pred)[-1]))
+ ins_pred_list.append(cur_ins_pred)
+
+ num_ins = paddle.sum(fg_num)
+ cate_preds = [
+ paddle.reshape(
+ paddle.transpose(cate_pred, [0, 2, 3, 1]),
+ shape=(-1, self.cate_out_channels)) for cate_pred in cate_preds
+ ]
+ flatten_cate_preds = paddle.concat(cate_preds)
+ new_cate_labels = []
+ for cate_label in cate_labels:
+ new_cate_labels.append(paddle.reshape(cate_label, shape=[-1]))
+ cate_labels = paddle.concat(new_cate_labels)
+
+ loss_ins, loss_cate = self.solov2_loss(
+ ins_pred_list, ins_labels, flatten_cate_preds, cate_labels, num_ins)
+
+ return {'loss_ins': loss_ins, 'loss_cate': loss_cate}
+
+ def get_prediction(self, cate_preds, kernel_preds, seg_pred, im_shape,
+ scale_factor):
+ """
+ Get prediction result of SOLOv2 network
+
+ Args:
+ cate_preds (list): List of Variables, output of categroy branch.
+ kernel_preds (list): List of Variables, output of kernel branch.
+ seg_pred (list): List of Variables, output of mask head stages.
+ im_shape (Variables): [h, w] for input images.
+ scale_factor (Variables): [scale, scale] for input images.
+ Returns:
+ seg_masks (Tensor): The prediction segmentation.
+ cate_labels (Tensor): The prediction categroy label of each segmentation.
+ seg_masks (Tensor): The prediction score of each segmentation.
+ """
+ num_levels = len(cate_preds)
+ featmap_size = paddle.shape(seg_pred)[-2:]
+ seg_masks_list = []
+ cate_labels_list = []
+ cate_scores_list = []
+ cate_preds = [cate_pred * 1.0 for cate_pred in cate_preds]
+ kernel_preds = [kernel_pred * 1.0 for kernel_pred in kernel_preds]
+ # Currently only supports batch size == 1
+ for idx in range(1):
+ cate_pred_list = [
+ paddle.reshape(
+ cate_preds[i][idx], shape=(-1, self.cate_out_channels))
+ for i in range(num_levels)
+ ]
+ seg_pred_list = seg_pred
+ kernel_pred_list = [
+ paddle.reshape(
+ paddle.transpose(kernel_preds[i][idx], [1, 2, 0]),
+ shape=(-1, self.kernel_out_channels))
+ for i in range(num_levels)
+ ]
+ cate_pred_list = paddle.concat(cate_pred_list, axis=0)
+ kernel_pred_list = paddle.concat(kernel_pred_list, axis=0)
+
+ seg_masks, cate_labels, cate_scores = self.get_seg_single(
+ cate_pred_list, seg_pred_list, kernel_pred_list, featmap_size,
+ im_shape[idx], scale_factor[idx][0])
+ bbox_num = paddle.shape(cate_labels)[0]
+ return seg_masks, cate_labels, cate_scores, bbox_num
+
+ def get_seg_single(self, cate_preds, seg_preds, kernel_preds, featmap_size,
+ im_shape, scale_factor):
+ """
+ The code of this function is based on:
+ https://github.com/WXinlong/SOLO/blob/master/mmdet/models/anchor_heads/solov2_head.py#L385
+ """
+ h = paddle.cast(im_shape[0], 'int32')[0]
+ w = paddle.cast(im_shape[1], 'int32')[0]
+ upsampled_size_out = [featmap_size[0] * 4, featmap_size[1] * 4]
+
+ y = paddle.zeros(shape=paddle.shape(cate_preds), dtype='float32')
+ inds = paddle.where(cate_preds > self.score_threshold, cate_preds, y)
+ inds = paddle.nonzero(inds)
+ cate_preds = paddle.reshape(cate_preds, shape=[-1])
+ # Prevent empty and increase fake data
+ ind_a = paddle.cast(paddle.shape(kernel_preds)[0], 'int64')
+ ind_b = paddle.zeros(shape=[1], dtype='int64')
+ inds_end = paddle.unsqueeze(paddle.concat([ind_a, ind_b]), 0)
+ inds = paddle.concat([inds, inds_end])
+ kernel_preds_end = paddle.ones(
+ shape=[1, self.kernel_out_channels], dtype='float32')
+ kernel_preds = paddle.concat([kernel_preds, kernel_preds_end])
+ cate_preds = paddle.concat(
+ [cate_preds, paddle.zeros(
+ shape=[1], dtype='float32')])
+
+ # cate_labels & kernel_preds
+ cate_labels = inds[:, 1]
+ kernel_preds = paddle.gather(kernel_preds, index=inds[:, 0])
+ cate_score_idx = paddle.add(inds[:, 0] * self.cate_out_channels,
+ cate_labels)
+ cate_scores = paddle.gather(cate_preds, index=cate_score_idx)
+
+ size_trans = np.power(self.seg_num_grids, 2)
+ strides = []
+ for _ind in range(len(self.segm_strides)):
+ strides.append(
+ paddle.full(
+ shape=[int(size_trans[_ind])],
+ fill_value=self.segm_strides[_ind],
+ dtype="int32"))
+ strides = paddle.concat(strides)
+ strides = paddle.concat(
+ [strides, paddle.zeros(
+ shape=[1], dtype='int32')])
+ strides = paddle.gather(strides, index=inds[:, 0])
+
+ # mask encoding.
+ kernel_preds = paddle.unsqueeze(kernel_preds, [2, 3])
+ seg_preds = F.conv2d(seg_preds, kernel_preds)
+ seg_preds = F.sigmoid(paddle.squeeze(seg_preds, [0]))
+ seg_masks = seg_preds > self.mask_threshold
+ seg_masks = paddle.cast(seg_masks, 'float32')
+ sum_masks = paddle.sum(seg_masks, axis=[1, 2])
+
+ y = paddle.zeros(shape=paddle.shape(sum_masks), dtype='float32')
+ keep = paddle.where(sum_masks > strides, sum_masks, y)
+ keep = paddle.nonzero(keep)
+ keep = paddle.squeeze(keep, axis=[1])
+ # Prevent empty and increase fake data
+ keep_other = paddle.concat(
+ [keep, paddle.cast(paddle.shape(sum_masks)[0] - 1, 'int64')])
+ keep_scores = paddle.concat(
+ [keep, paddle.cast(paddle.shape(sum_masks)[0], 'int64')])
+ cate_scores_end = paddle.zeros(shape=[1], dtype='float32')
+ cate_scores = paddle.concat([cate_scores, cate_scores_end])
+
+ seg_masks = paddle.gather(seg_masks, index=keep_other)
+ seg_preds = paddle.gather(seg_preds, index=keep_other)
+ sum_masks = paddle.gather(sum_masks, index=keep_other)
+ cate_labels = paddle.gather(cate_labels, index=keep_other)
+ cate_scores = paddle.gather(cate_scores, index=keep_scores)
+
+ # mask scoring.
+ seg_mul = paddle.cast(seg_preds * seg_masks, 'float32')
+ seg_scores = paddle.sum(seg_mul, axis=[1, 2]) / sum_masks
+ cate_scores *= seg_scores
+ # Matrix NMS
+ seg_preds, cate_scores, cate_labels = self.mask_nms(
+ seg_preds, seg_masks, cate_labels, cate_scores, sum_masks=sum_masks)
+ ori_shape = im_shape[:2] / scale_factor + 0.5
+ ori_shape = paddle.cast(ori_shape, 'int32')
+ seg_preds = F.interpolate(
+ paddle.unsqueeze(seg_preds, 0),
+ size=upsampled_size_out,
+ mode='bilinear',
+ align_corners=False,
+ align_mode=0)
+ seg_preds = paddle.slice(
+ seg_preds, axes=[2, 3], starts=[0, 0], ends=[h, w])
+ seg_masks = paddle.squeeze(
+ F.interpolate(
+ seg_preds,
+ size=ori_shape[:2],
+ mode='bilinear',
+ align_corners=False,
+ align_mode=0),
+ axis=[0])
+ seg_masks = paddle.cast(seg_masks > self.mask_threshold, 'uint8')
+ return seg_masks, cate_labels, cate_scores
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/sparsercnn_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/sparsercnn_head.py
new file mode 100644
index 000000000..377cf27fc
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/sparsercnn_head.py
@@ -0,0 +1,375 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/PeizeSun/SparseR-CNN/blob/main/projects/SparseRCNN/sparsercnn/head.py
+Ths copyright of PeizeSun/SparseR-CNN is as follows:
+MIT License [see LICENSE for details]
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import copy
+import paddle
+import paddle.nn as nn
+
+from ppdet.core.workspace import register
+from ppdet.modeling.heads.roi_extractor import RoIAlign
+from ppdet.modeling.bbox_utils import delta2bbox
+from .. import initializer as init
+
+_DEFAULT_SCALE_CLAMP = math.log(100000. / 16)
+
+
+class DynamicConv(nn.Layer):
+ def __init__(
+ self,
+ head_hidden_dim,
+ head_dim_dynamic,
+ head_num_dynamic, ):
+ super().__init__()
+
+ self.hidden_dim = head_hidden_dim
+ self.dim_dynamic = head_dim_dynamic
+ self.num_dynamic = head_num_dynamic
+ self.num_params = self.hidden_dim * self.dim_dynamic
+ self.dynamic_layer = nn.Linear(self.hidden_dim,
+ self.num_dynamic * self.num_params)
+
+ self.norm1 = nn.LayerNorm(self.dim_dynamic)
+ self.norm2 = nn.LayerNorm(self.hidden_dim)
+
+ self.activation = nn.ReLU()
+
+ pooler_resolution = 7
+ num_output = self.hidden_dim * pooler_resolution**2
+ self.out_layer = nn.Linear(num_output, self.hidden_dim)
+ self.norm3 = nn.LayerNorm(self.hidden_dim)
+
+ def forward(self, pro_features, roi_features):
+ '''
+ pro_features: (1, N * nr_boxes, self.d_model)
+ roi_features: (49, N * nr_boxes, self.d_model)
+ '''
+ features = roi_features.transpose(perm=[1, 0, 2])
+ parameters = self.dynamic_layer(pro_features).transpose(perm=[1, 0, 2])
+
+ param1 = parameters[:, :, :self.num_params].reshape(
+ [-1, self.hidden_dim, self.dim_dynamic])
+ param2 = parameters[:, :, self.num_params:].reshape(
+ [-1, self.dim_dynamic, self.hidden_dim])
+
+ features = paddle.bmm(features, param1)
+ features = self.norm1(features)
+ features = self.activation(features)
+
+ features = paddle.bmm(features, param2)
+ features = self.norm2(features)
+ features = self.activation(features)
+
+ features = features.flatten(1)
+ features = self.out_layer(features)
+ features = self.norm3(features)
+ features = self.activation(features)
+
+ return features
+
+
+class RCNNHead(nn.Layer):
+ def __init__(
+ self,
+ d_model,
+ num_classes,
+ dim_feedforward,
+ nhead,
+ dropout,
+ head_cls,
+ head_reg,
+ head_dim_dynamic,
+ head_num_dynamic,
+ scale_clamp: float=_DEFAULT_SCALE_CLAMP,
+ bbox_weights=(2.0, 2.0, 1.0, 1.0), ):
+ super().__init__()
+
+ self.d_model = d_model
+
+ # dynamic.
+ self.self_attn = nn.MultiHeadAttention(d_model, nhead, dropout=dropout)
+ self.inst_interact = DynamicConv(d_model, head_dim_dynamic,
+ head_num_dynamic)
+
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
+ self.dropout = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
+
+ self.norm1 = nn.LayerNorm(d_model)
+ self.norm2 = nn.LayerNorm(d_model)
+ self.norm3 = nn.LayerNorm(d_model)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+ self.dropout3 = nn.Dropout(dropout)
+
+ self.activation = nn.ReLU()
+
+ # cls.
+ num_cls = head_cls
+ cls_module = list()
+ for _ in range(num_cls):
+ cls_module.append(nn.Linear(d_model, d_model, bias_attr=False))
+ cls_module.append(nn.LayerNorm(d_model))
+ cls_module.append(nn.ReLU())
+ self.cls_module = nn.LayerList(cls_module)
+
+ # reg.
+ num_reg = head_reg
+ reg_module = list()
+ for _ in range(num_reg):
+ reg_module.append(nn.Linear(d_model, d_model, bias_attr=False))
+ reg_module.append(nn.LayerNorm(d_model))
+ reg_module.append(nn.ReLU())
+ self.reg_module = nn.LayerList(reg_module)
+
+ # pred.
+ self.class_logits = nn.Linear(d_model, num_classes)
+ self.bboxes_delta = nn.Linear(d_model, 4)
+ self.scale_clamp = scale_clamp
+ self.bbox_weights = bbox_weights
+
+ def forward(self, features, bboxes, pro_features, pooler):
+ """
+ :param bboxes: (N, nr_boxes, 4)
+ :param pro_features: (N, nr_boxes, d_model)
+ """
+
+ N, nr_boxes = bboxes.shape[:2]
+
+ proposal_boxes = list()
+ for b in range(N):
+ proposal_boxes.append(bboxes[b])
+ roi_num = paddle.full([N], nr_boxes).astype("int32")
+
+ roi_features = pooler(features, proposal_boxes, roi_num)
+ roi_features = roi_features.reshape(
+ [N * nr_boxes, self.d_model, -1]).transpose(perm=[2, 0, 1])
+
+ # self_att.
+ pro_features = pro_features.reshape([N, nr_boxes, self.d_model])
+ pro_features2 = self.self_attn(
+ pro_features, pro_features, value=pro_features)
+ pro_features = pro_features.transpose(perm=[1, 0, 2]) + self.dropout1(
+ pro_features2.transpose(perm=[1, 0, 2]))
+ pro_features = self.norm1(pro_features)
+
+ # inst_interact.
+ pro_features = pro_features.reshape(
+ [nr_boxes, N, self.d_model]).transpose(perm=[1, 0, 2]).reshape(
+ [1, N * nr_boxes, self.d_model])
+ pro_features2 = self.inst_interact(pro_features, roi_features)
+ pro_features = pro_features + self.dropout2(pro_features2)
+ obj_features = self.norm2(pro_features)
+
+ # obj_feature.
+ obj_features2 = self.linear2(
+ self.dropout(self.activation(self.linear1(obj_features))))
+ obj_features = obj_features + self.dropout3(obj_features2)
+ obj_features = self.norm3(obj_features)
+
+ fc_feature = obj_features.transpose(perm=[1, 0, 2]).reshape(
+ [N * nr_boxes, -1])
+ cls_feature = fc_feature.clone()
+ reg_feature = fc_feature.clone()
+ for cls_layer in self.cls_module:
+ cls_feature = cls_layer(cls_feature)
+ for reg_layer in self.reg_module:
+ reg_feature = reg_layer(reg_feature)
+ class_logits = self.class_logits(cls_feature)
+ bboxes_deltas = self.bboxes_delta(reg_feature)
+ pred_bboxes = delta2bbox(bboxes_deltas,
+ bboxes.reshape([-1, 4]), self.bbox_weights)
+
+ return class_logits.reshape([N, nr_boxes, -1]), pred_bboxes.reshape(
+ [N, nr_boxes, -1]), obj_features
+
+
+@register
+class SparseRCNNHead(nn.Layer):
+ '''
+ SparsercnnHead
+ Args:
+ roi_input_shape (list[ShapeSpec]): The output shape of fpn
+ num_classes (int): Number of classes,
+ head_hidden_dim (int): The param of MultiHeadAttention,
+ head_dim_feedforward (int): The param of MultiHeadAttention,
+ nhead (int): The param of MultiHeadAttention,
+ head_dropout (float): The p of dropout,
+ head_cls (int): The number of class head,
+ head_reg (int): The number of regressionhead,
+ head_num_dynamic (int): The number of DynamicConv's param,
+ head_num_heads (int): The number of RCNNHead,
+ deep_supervision (int): wheather supervise the intermediate results,
+ num_proposals (int): the number of proposals boxes and features
+ '''
+ __inject__ = ['loss_func']
+ __shared__ = ['num_classes']
+
+ def __init__(
+ self,
+ head_hidden_dim,
+ head_dim_feedforward,
+ nhead,
+ head_dropout,
+ head_cls,
+ head_reg,
+ head_dim_dynamic,
+ head_num_dynamic,
+ head_num_heads,
+ deep_supervision,
+ num_proposals,
+ num_classes=80,
+ loss_func="SparseRCNNLoss",
+ roi_input_shape=None, ):
+ super().__init__()
+
+ # Build RoI.
+ box_pooler = self._init_box_pooler(roi_input_shape)
+ self.box_pooler = box_pooler
+
+ # Build heads.
+ rcnn_head = RCNNHead(
+ head_hidden_dim,
+ num_classes,
+ head_dim_feedforward,
+ nhead,
+ head_dropout,
+ head_cls,
+ head_reg,
+ head_dim_dynamic,
+ head_num_dynamic, )
+ self.head_series = nn.LayerList(
+ [copy.deepcopy(rcnn_head) for i in range(head_num_heads)])
+ self.return_intermediate = deep_supervision
+
+ self.num_classes = num_classes
+
+ # build init proposal
+ self.init_proposal_features = nn.Embedding(num_proposals,
+ head_hidden_dim)
+ self.init_proposal_boxes = nn.Embedding(num_proposals, 4)
+
+ self.lossfunc = loss_func
+
+ # Init parameters.
+ init.reset_initialized_parameter(self)
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ # init all parameters.
+ prior_prob = 0.01
+ bias_value = -math.log((1 - prior_prob) / prior_prob)
+
+ for m in self.sublayers():
+ if isinstance(m, nn.Linear):
+ init.xavier_normal_(m.weight, reverse=True)
+ elif not isinstance(m, nn.Embedding) and hasattr(
+ m, "weight") and m.weight.dim() > 1:
+ init.xavier_normal_(m.weight, reverse=False)
+
+ if hasattr(m, "bias") and m.bias is not None and m.bias.shape[
+ -1] == self.num_classes:
+ init.constant_(m.bias, bias_value)
+
+ init_bboxes = paddle.empty_like(self.init_proposal_boxes.weight)
+ init_bboxes[:, :2] = 0.5
+ init_bboxes[:, 2:] = 1.0
+ self.init_proposal_boxes.weight.set_value(init_bboxes)
+
+ @staticmethod
+ def _init_box_pooler(input_shape):
+
+ pooler_resolution = 7
+ sampling_ratio = 2
+
+ if input_shape is not None:
+ pooler_scales = tuple(1.0 / input_shape[k].stride
+ for k in range(len(input_shape)))
+ in_channels = [
+ input_shape[f].channels for f in range(len(input_shape))
+ ]
+ end_level = len(input_shape) - 1
+ # Check all channel counts are equal
+ assert len(set(in_channels)) == 1, in_channels
+ else:
+ pooler_scales = [1.0 / 4.0, 1.0 / 8.0, 1.0 / 16.0, 1.0 / 32.0]
+ end_level = 3
+
+ box_pooler = RoIAlign(
+ resolution=pooler_resolution,
+ spatial_scale=pooler_scales,
+ sampling_ratio=sampling_ratio,
+ end_level=end_level,
+ aligned=True)
+ return box_pooler
+
+ def forward(self, features, input_whwh):
+
+ bs = len(features[0])
+ bboxes = box_cxcywh_to_xyxy(self.init_proposal_boxes.weight.clone(
+ )).unsqueeze(0)
+ bboxes = bboxes * input_whwh.unsqueeze(-2)
+
+ init_features = self.init_proposal_features.weight.unsqueeze(0).tile(
+ [1, bs, 1])
+ proposal_features = init_features.clone()
+
+ inter_class_logits = []
+ inter_pred_bboxes = []
+
+ for rcnn_head in self.head_series:
+ class_logits, pred_bboxes, proposal_features = rcnn_head(
+ features, bboxes, proposal_features, self.box_pooler)
+
+ if self.return_intermediate:
+ inter_class_logits.append(class_logits)
+ inter_pred_bboxes.append(pred_bboxes)
+ bboxes = pred_bboxes.detach()
+
+ output = {
+ 'pred_logits': inter_class_logits[-1],
+ 'pred_boxes': inter_pred_bboxes[-1]
+ }
+ if self.return_intermediate:
+ output['aux_outputs'] = [{
+ 'pred_logits': a,
+ 'pred_boxes': b
+ } for a, b in zip(inter_class_logits[:-1], inter_pred_bboxes[:-1])]
+
+ return output
+
+ def get_loss(self, outputs, targets):
+ losses = self.lossfunc(outputs, targets)
+ weight_dict = self.lossfunc.weight_dict
+
+ for k in losses.keys():
+ if k in weight_dict:
+ losses[k] *= weight_dict[k]
+
+ return losses
+
+
+def box_cxcywh_to_xyxy(x):
+ x_c, y_c, w, h = x.unbind(-1)
+ b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
+ return paddle.stack(b, axis=-1)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/ssd_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/ssd_head.py
new file mode 100644
index 000000000..07e7e92f9
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/ssd_head.py
@@ -0,0 +1,215 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register
+from paddle.regularizer import L2Decay
+from paddle import ParamAttr
+
+from ..layers import AnchorGeneratorSSD
+
+
+class SepConvLayer(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ padding=1,
+ conv_decay=0.):
+ super(SepConvLayer, self).__init__()
+ self.dw_conv = nn.Conv2D(
+ in_channels=in_channels,
+ out_channels=in_channels,
+ kernel_size=kernel_size,
+ stride=1,
+ padding=padding,
+ groups=in_channels,
+ weight_attr=ParamAttr(regularizer=L2Decay(conv_decay)),
+ bias_attr=False)
+
+ self.bn = nn.BatchNorm2D(
+ in_channels,
+ weight_attr=ParamAttr(regularizer=L2Decay(0.)),
+ bias_attr=ParamAttr(regularizer=L2Decay(0.)))
+
+ self.pw_conv = nn.Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ weight_attr=ParamAttr(regularizer=L2Decay(conv_decay)),
+ bias_attr=False)
+
+ def forward(self, x):
+ x = self.dw_conv(x)
+ x = F.relu6(self.bn(x))
+ x = self.pw_conv(x)
+ return x
+
+
+class SSDExtraHead(nn.Layer):
+ def __init__(self,
+ in_channels=256,
+ out_channels=([256, 512], [256, 512], [128, 256], [128, 256],
+ [128, 256]),
+ strides=(2, 2, 2, 1, 1),
+ paddings=(1, 1, 1, 0, 0)):
+ super(SSDExtraHead, self).__init__()
+ self.convs = nn.LayerList()
+ for out_channel, stride, padding in zip(out_channels, strides,
+ paddings):
+ self.convs.append(
+ self._make_layers(in_channels, out_channel[0], out_channel[1],
+ stride, padding))
+ in_channels = out_channel[-1]
+
+ def _make_layers(self, c_in, c_hidden, c_out, stride_3x3, padding_3x3):
+ return nn.Sequential(
+ nn.Conv2D(c_in, c_hidden, 1),
+ nn.ReLU(),
+ nn.Conv2D(c_hidden, c_out, 3, stride_3x3, padding_3x3), nn.ReLU())
+
+ def forward(self, x):
+ out = [x]
+ for conv_layer in self.convs:
+ out.append(conv_layer(out[-1]))
+ return out
+
+
+@register
+class SSDHead(nn.Layer):
+ """
+ SSDHead
+
+ Args:
+ num_classes (int): Number of classes
+ in_channels (list): Number of channels per input feature
+ anchor_generator (dict): Configuration of 'AnchorGeneratorSSD' instance
+ kernel_size (int): Conv kernel size
+ padding (int): Conv padding
+ use_sepconv (bool): Use SepConvLayer if true
+ conv_decay (float): Conv regularization coeff
+ loss (object): 'SSDLoss' instance
+ use_extra_head (bool): If use ResNet34 as baskbone, you should set `use_extra_head`=True
+ """
+
+ __shared__ = ['num_classes']
+ __inject__ = ['anchor_generator', 'loss']
+
+ def __init__(self,
+ num_classes=80,
+ in_channels=(512, 1024, 512, 256, 256, 256),
+ anchor_generator=AnchorGeneratorSSD().__dict__,
+ kernel_size=3,
+ padding=1,
+ use_sepconv=False,
+ conv_decay=0.,
+ loss='SSDLoss',
+ use_extra_head=False):
+ super(SSDHead, self).__init__()
+ # add background class
+ self.num_classes = num_classes + 1
+ self.in_channels = in_channels
+ self.anchor_generator = anchor_generator
+ self.loss = loss
+ self.use_extra_head = use_extra_head
+
+ if self.use_extra_head:
+ self.ssd_extra_head = SSDExtraHead()
+ self.in_channels = [256, 512, 512, 256, 256, 256]
+
+ if isinstance(anchor_generator, dict):
+ self.anchor_generator = AnchorGeneratorSSD(**anchor_generator)
+
+ self.num_priors = self.anchor_generator.num_priors
+ self.box_convs = []
+ self.score_convs = []
+ for i, num_prior in enumerate(self.num_priors):
+ box_conv_name = "boxes{}".format(i)
+ if not use_sepconv:
+ box_conv = self.add_sublayer(
+ box_conv_name,
+ nn.Conv2D(
+ in_channels=self.in_channels[i],
+ out_channels=num_prior * 4,
+ kernel_size=kernel_size,
+ padding=padding))
+ else:
+ box_conv = self.add_sublayer(
+ box_conv_name,
+ SepConvLayer(
+ in_channels=self.in_channels[i],
+ out_channels=num_prior * 4,
+ kernel_size=kernel_size,
+ padding=padding,
+ conv_decay=conv_decay))
+ self.box_convs.append(box_conv)
+
+ score_conv_name = "scores{}".format(i)
+ if not use_sepconv:
+ score_conv = self.add_sublayer(
+ score_conv_name,
+ nn.Conv2D(
+ in_channels=self.in_channels[i],
+ out_channels=num_prior * self.num_classes,
+ kernel_size=kernel_size,
+ padding=padding))
+ else:
+ score_conv = self.add_sublayer(
+ score_conv_name,
+ SepConvLayer(
+ in_channels=self.in_channels[i],
+ out_channels=num_prior * self.num_classes,
+ kernel_size=kernel_size,
+ padding=padding,
+ conv_decay=conv_decay))
+ self.score_convs.append(score_conv)
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape], }
+
+ def forward(self, feats, image, gt_bbox=None, gt_class=None):
+ if self.use_extra_head:
+ assert len(feats) == 1, \
+ ("If you set use_extra_head=True, backbone feature "
+ "list length should be 1.")
+ feats = self.ssd_extra_head(feats[0])
+ box_preds = []
+ cls_scores = []
+ for feat, box_conv, score_conv in zip(feats, self.box_convs,
+ self.score_convs):
+ box_pred = box_conv(feat)
+ box_pred = paddle.transpose(box_pred, [0, 2, 3, 1])
+ box_pred = paddle.reshape(box_pred, [0, -1, 4])
+ box_preds.append(box_pred)
+
+ cls_score = score_conv(feat)
+ cls_score = paddle.transpose(cls_score, [0, 2, 3, 1])
+ cls_score = paddle.reshape(cls_score, [0, -1, self.num_classes])
+ cls_scores.append(cls_score)
+
+ prior_boxes = self.anchor_generator(feats, image)
+
+ if self.training:
+ return self.get_loss(box_preds, cls_scores, gt_bbox, gt_class,
+ prior_boxes)
+ else:
+ return (box_preds, cls_scores), prior_boxes
+
+ def get_loss(self, boxes, scores, gt_bbox, gt_class, prior_boxes):
+ return self.loss(boxes, scores, gt_bbox, gt_class, prior_boxes)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/tood_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/tood_head.py
new file mode 100644
index 000000000..b9dbd17e3
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/tood_head.py
@@ -0,0 +1,425 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import Constant
+
+from ppdet.core.workspace import register
+from ..initializer import normal_, constant_, bias_init_with_prob
+from ppdet.modeling.bbox_utils import bbox_center
+from ..losses import GIoULoss
+from paddle.vision.ops import deform_conv2d
+from ppdet.modeling.layers import ConvNormLayer
+
+
+class ScaleReg(nn.Layer):
+ """
+ Parameter for scaling the regression outputs.
+ """
+
+ def __init__(self, init_scale=1.):
+ super(ScaleReg, self).__init__()
+ self.scale_reg = self.create_parameter(
+ shape=[1],
+ attr=ParamAttr(initializer=Constant(value=init_scale)),
+ dtype="float32")
+
+ def forward(self, inputs):
+ out = inputs * self.scale_reg
+ return out
+
+
+class TaskDecomposition(nn.Layer):
+ """This code is based on
+ https://github.com/fcjian/TOOD/blob/master/mmdet/models/dense_heads/tood_head.py
+ """
+
+ def __init__(
+ self,
+ feat_channels,
+ stacked_convs,
+ la_down_rate=8,
+ norm_type='gn',
+ norm_groups=32, ):
+ super(TaskDecomposition, self).__init__()
+ self.feat_channels = feat_channels
+ self.stacked_convs = stacked_convs
+ self.norm_type = norm_type
+ self.norm_groups = norm_groups
+ self.in_channels = self.feat_channels * self.stacked_convs
+ self.la_conv1 = nn.Conv2D(self.in_channels,
+ self.in_channels // la_down_rate, 1)
+ self.la_conv2 = nn.Conv2D(self.in_channels // la_down_rate,
+ self.stacked_convs, 1)
+
+ self.reduction_conv = ConvNormLayer(
+ self.in_channels,
+ self.feat_channels,
+ filter_size=1,
+ stride=1,
+ norm_type=self.norm_type,
+ norm_groups=self.norm_groups)
+
+ self._init_weights()
+
+ def _init_weights(self):
+ normal_(self.la_conv1.weight, std=0.001)
+ normal_(self.la_conv2.weight, std=0.001)
+
+ def forward(self, feat, avg_feat=None):
+ b, _, h, w = feat.shape
+ if avg_feat is None:
+ avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
+ weight = F.relu(self.la_conv1(avg_feat))
+ weight = F.sigmoid(self.la_conv2(weight))
+
+ # here new_conv_weight = layer_attention_weight * conv_weight
+ # in order to save memory and FLOPs.
+ conv_weight = weight.reshape([b, 1, self.stacked_convs, 1]) * \
+ self.reduction_conv.conv.weight.reshape(
+ [1, self.feat_channels, self.stacked_convs, self.feat_channels])
+ conv_weight = conv_weight.reshape(
+ [b, self.feat_channels, self.in_channels])
+ feat = feat.reshape([b, self.in_channels, h * w])
+ feat = paddle.bmm(conv_weight, feat).reshape(
+ [b, self.feat_channels, h, w])
+ if self.norm_type is not None:
+ feat = self.reduction_conv.norm(feat)
+ feat = F.relu(feat)
+ return feat
+
+
+@register
+class TOODHead(nn.Layer):
+ """This code is based on
+ https://github.com/fcjian/TOOD/blob/master/mmdet/models/dense_heads/tood_head.py
+ """
+ __inject__ = ['nms', 'static_assigner', 'assigner']
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ num_classes=80,
+ feat_channels=256,
+ stacked_convs=6,
+ fpn_strides=(8, 16, 32, 64, 128),
+ grid_cell_scale=8,
+ grid_cell_offset=0.5,
+ norm_type='gn',
+ norm_groups=32,
+ static_assigner_epoch=4,
+ use_align_head=True,
+ loss_weight={
+ 'class': 1.0,
+ 'bbox': 1.0,
+ 'iou': 2.0,
+ },
+ nms='MultiClassNMS',
+ static_assigner='ATSSAssigner',
+ assigner='TaskAlignedAssigner'):
+ super(TOODHead, self).__init__()
+ self.num_classes = num_classes
+ self.feat_channels = feat_channels
+ self.stacked_convs = stacked_convs
+ self.fpn_strides = fpn_strides
+ self.grid_cell_scale = grid_cell_scale
+ self.grid_cell_offset = grid_cell_offset
+ self.static_assigner_epoch = static_assigner_epoch
+ self.use_align_head = use_align_head
+ self.nms = nms
+ self.static_assigner = static_assigner
+ self.assigner = assigner
+ self.loss_weight = loss_weight
+ self.giou_loss = GIoULoss()
+
+ self.inter_convs = nn.LayerList()
+ for i in range(self.stacked_convs):
+ self.inter_convs.append(
+ ConvNormLayer(
+ self.feat_channels,
+ self.feat_channels,
+ filter_size=3,
+ stride=1,
+ norm_type=norm_type,
+ norm_groups=norm_groups))
+
+ self.cls_decomp = TaskDecomposition(
+ self.feat_channels,
+ self.stacked_convs,
+ self.stacked_convs * 8,
+ norm_type=norm_type,
+ norm_groups=norm_groups)
+ self.reg_decomp = TaskDecomposition(
+ self.feat_channels,
+ self.stacked_convs,
+ self.stacked_convs * 8,
+ norm_type=norm_type,
+ norm_groups=norm_groups)
+
+ self.tood_cls = nn.Conv2D(
+ self.feat_channels, self.num_classes, 3, padding=1)
+ self.tood_reg = nn.Conv2D(self.feat_channels, 4, 3, padding=1)
+
+ if self.use_align_head:
+ self.cls_prob_conv1 = nn.Conv2D(self.feat_channels *
+ self.stacked_convs,
+ self.feat_channels // 4, 1)
+ self.cls_prob_conv2 = nn.Conv2D(
+ self.feat_channels // 4, 1, 3, padding=1)
+ self.reg_offset_conv1 = nn.Conv2D(self.feat_channels *
+ self.stacked_convs,
+ self.feat_channels // 4, 1)
+ self.reg_offset_conv2 = nn.Conv2D(
+ self.feat_channels // 4, 4 * 2, 3, padding=1)
+
+ self.scales_regs = nn.LayerList([ScaleReg() for _ in self.fpn_strides])
+
+ self._init_weights()
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {
+ 'feat_channels': input_shape[0].channels,
+ 'fpn_strides': [i.stride for i in input_shape],
+ }
+
+ def _init_weights(self):
+ bias_cls = bias_init_with_prob(0.01)
+ normal_(self.tood_cls.weight, std=0.01)
+ constant_(self.tood_cls.bias, bias_cls)
+ normal_(self.tood_reg.weight, std=0.01)
+
+ if self.use_align_head:
+ normal_(self.cls_prob_conv1.weight, std=0.01)
+ normal_(self.cls_prob_conv2.weight, std=0.01)
+ constant_(self.cls_prob_conv2.bias, bias_cls)
+ normal_(self.reg_offset_conv1.weight, std=0.001)
+ normal_(self.reg_offset_conv2.weight, std=0.001)
+ constant_(self.reg_offset_conv2.bias)
+
+ def _generate_anchors(self, feats):
+ anchors, num_anchors_list = [], []
+ stride_tensor_list = []
+ for feat, stride in zip(feats, self.fpn_strides):
+ _, _, h, w = feat.shape
+ cell_half_size = self.grid_cell_scale * stride * 0.5
+ shift_x = (paddle.arange(end=w) + self.grid_cell_offset) * stride
+ shift_y = (paddle.arange(end=h) + self.grid_cell_offset) * stride
+ shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)
+ anchor = paddle.stack(
+ [
+ shift_x - cell_half_size, shift_y - cell_half_size,
+ shift_x + cell_half_size, shift_y + cell_half_size
+ ],
+ axis=-1)
+ anchors.append(anchor.reshape([-1, 4]))
+ num_anchors_list.append(len(anchors[-1]))
+ stride_tensor_list.append(
+ paddle.full([num_anchors_list[-1], 1], stride))
+ return anchors, num_anchors_list, stride_tensor_list
+
+ @staticmethod
+ def _batch_distance2bbox(points, distance, max_shapes=None):
+ """Decode distance prediction to bounding box.
+ Args:
+ points (Tensor): [B, l, 2]
+ distance (Tensor): [B, l, 4]
+ max_shapes (tuple): [B, 2], "h w" format, Shape of the image.
+ Returns:
+ Tensor: Decoded bboxes.
+ """
+ x1 = points[:, :, 0] - distance[:, :, 0]
+ y1 = points[:, :, 1] - distance[:, :, 1]
+ x2 = points[:, :, 0] + distance[:, :, 2]
+ y2 = points[:, :, 1] + distance[:, :, 3]
+ bboxes = paddle.stack([x1, y1, x2, y2], -1)
+ if max_shapes is not None:
+ out_bboxes = []
+ for bbox, max_shape in zip(bboxes, max_shapes):
+ bbox[:, 0] = bbox[:, 0].clip(min=0, max=max_shape[1])
+ bbox[:, 1] = bbox[:, 1].clip(min=0, max=max_shape[0])
+ bbox[:, 2] = bbox[:, 2].clip(min=0, max=max_shape[1])
+ bbox[:, 3] = bbox[:, 3].clip(min=0, max=max_shape[0])
+ out_bboxes.append(bbox)
+ out_bboxes = paddle.stack(out_bboxes)
+ return out_bboxes
+ return bboxes
+
+ @staticmethod
+ def _deform_sampling(feat, offset):
+ """ Sampling the feature according to offset.
+ Args:
+ feat (Tensor): Feature
+ offset (Tensor): Spatial offset for for feature sampliing
+ """
+ # it is an equivalent implementation of bilinear interpolation
+ # you can also use F.grid_sample instead
+ c = feat.shape[1]
+ weight = paddle.ones([c, 1, 1, 1])
+ y = deform_conv2d(feat, offset, weight, deformable_groups=c, groups=c)
+ return y
+
+ def forward(self, feats):
+ assert len(feats) == len(self.fpn_strides), \
+ "The size of feats is not equal to size of fpn_strides"
+
+ anchors, num_anchors_list, stride_tensor_list = self._generate_anchors(
+ feats)
+ cls_score_list, bbox_pred_list = [], []
+ for feat, scale_reg, anchor, stride in zip(feats, self.scales_regs,
+ anchors, self.fpn_strides):
+ b, _, h, w = feat.shape
+ inter_feats = []
+ for inter_conv in self.inter_convs:
+ feat = F.relu(inter_conv(feat))
+ inter_feats.append(feat)
+ feat = paddle.concat(inter_feats, axis=1)
+
+ # task decomposition
+ avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
+ cls_feat = self.cls_decomp(feat, avg_feat)
+ reg_feat = self.reg_decomp(feat, avg_feat)
+
+ # cls prediction and alignment
+ cls_logits = self.tood_cls(cls_feat)
+ if self.use_align_head:
+ cls_prob = F.relu(self.cls_prob_conv1(feat))
+ cls_prob = F.sigmoid(self.cls_prob_conv2(cls_prob))
+ cls_score = (F.sigmoid(cls_logits) * cls_prob).sqrt()
+ else:
+ cls_score = F.sigmoid(cls_logits)
+ cls_score_list.append(cls_score.flatten(2).transpose([0, 2, 1]))
+
+ # reg prediction and alignment
+ reg_dist = scale_reg(self.tood_reg(reg_feat).exp())
+ reg_dist = reg_dist.transpose([0, 2, 3, 1]).reshape([b, -1, 4])
+ anchor_centers = bbox_center(anchor).unsqueeze(0) / stride
+ reg_bbox = self._batch_distance2bbox(
+ anchor_centers.tile([b, 1, 1]), reg_dist)
+ if self.use_align_head:
+ reg_bbox = reg_bbox.reshape([b, h, w, 4]).transpose(
+ [0, 3, 1, 2])
+ reg_offset = F.relu(self.reg_offset_conv1(feat))
+ reg_offset = self.reg_offset_conv2(reg_offset)
+ bbox_pred = self._deform_sampling(reg_bbox, reg_offset)
+ bbox_pred = bbox_pred.flatten(2).transpose([0, 2, 1])
+ else:
+ bbox_pred = reg_bbox
+
+ if not self.training:
+ bbox_pred *= stride
+ bbox_pred_list.append(bbox_pred)
+ cls_score_list = paddle.concat(cls_score_list, axis=1)
+ bbox_pred_list = paddle.concat(bbox_pred_list, axis=1)
+ anchors = paddle.concat(anchors)
+ anchors.stop_gradient = True
+ stride_tensor_list = paddle.concat(stride_tensor_list).unsqueeze(0)
+ stride_tensor_list.stop_gradient = True
+
+ return cls_score_list, bbox_pred_list, anchors, num_anchors_list, stride_tensor_list
+
+ @staticmethod
+ def _focal_loss(score, label, alpha=0.25, gamma=2.0):
+ weight = (score - label).pow(gamma)
+ if alpha > 0:
+ alpha_t = alpha * label + (1 - alpha) * (1 - label)
+ weight *= alpha_t
+ loss = F.binary_cross_entropy(
+ score, label, weight=weight, reduction='sum')
+ return loss
+
+ def get_loss(self, head_outs, gt_meta):
+ pred_scores, pred_bboxes, anchors, num_anchors_list, stride_tensor_list = head_outs
+ gt_labels = gt_meta['gt_class']
+ gt_bboxes = gt_meta['gt_bbox']
+ # label assignment
+ if gt_meta['epoch_id'] < self.static_assigner_epoch:
+ assigned_labels, assigned_bboxes, assigned_scores = self.static_assigner(
+ anchors,
+ num_anchors_list,
+ gt_labels,
+ gt_bboxes,
+ bg_index=self.num_classes)
+ alpha_l = 0.25
+ else:
+ assigned_labels, assigned_bboxes, assigned_scores = self.assigner(
+ pred_scores.detach(),
+ pred_bboxes.detach() * stride_tensor_list,
+ bbox_center(anchors),
+ gt_labels,
+ gt_bboxes,
+ bg_index=self.num_classes)
+ alpha_l = -1
+
+ # rescale bbox
+ assigned_bboxes /= stride_tensor_list
+ # classification loss
+ loss_cls = self._focal_loss(pred_scores, assigned_scores, alpha=alpha_l)
+ # select positive samples mask
+ mask_positive = (assigned_labels != self.num_classes)
+ num_pos = mask_positive.astype(paddle.float32).sum()
+ # bbox regression loss
+ if num_pos > 0:
+ bbox_mask = mask_positive.unsqueeze(-1).tile([1, 1, 4])
+ pred_bboxes_pos = paddle.masked_select(pred_bboxes,
+ bbox_mask).reshape([-1, 4])
+ assigned_bboxes_pos = paddle.masked_select(
+ assigned_bboxes, bbox_mask).reshape([-1, 4])
+ bbox_weight = paddle.masked_select(
+ assigned_scores.sum(-1), mask_positive).unsqueeze(-1)
+ # iou loss
+ loss_iou = self.giou_loss(pred_bboxes_pos,
+ assigned_bboxes_pos) * bbox_weight
+ loss_iou = loss_iou.sum() / bbox_weight.sum()
+ # l1 loss
+ loss_l1 = F.l1_loss(pred_bboxes_pos, assigned_bboxes_pos)
+ else:
+ loss_iou = paddle.zeros([1])
+ loss_l1 = paddle.zeros([1])
+
+ loss_cls /= assigned_scores.sum().clip(min=1)
+ loss = self.loss_weight['class'] * loss_cls + self.loss_weight[
+ 'iou'] * loss_iou
+
+ return {
+ 'loss': loss,
+ 'loss_class': loss_cls,
+ 'loss_iou': loss_iou,
+ 'loss_l1': loss_l1
+ }
+
+ def post_process(self, head_outs, img_shape, scale_factor):
+ pred_scores, pred_bboxes, _, _, _ = head_outs
+ pred_scores = pred_scores.transpose([0, 2, 1])
+
+ for i in range(len(pred_bboxes)):
+ pred_bboxes[i, :, 0] = pred_bboxes[i, :, 0].clip(
+ min=0, max=img_shape[i, 1])
+ pred_bboxes[i, :, 1] = pred_bboxes[i, :, 1].clip(
+ min=0, max=img_shape[i, 0])
+ pred_bboxes[i, :, 2] = pred_bboxes[i, :, 2].clip(
+ min=0, max=img_shape[i, 1])
+ pred_bboxes[i, :, 3] = pred_bboxes[i, :, 3].clip(
+ min=0, max=img_shape[i, 0])
+ # scale bbox to origin
+ scale_factor = scale_factor.flip([1]).tile([1, 2]).unsqueeze(1)
+ pred_bboxes /= scale_factor
+ bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)
+ return bbox_pred, bbox_num
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/ttf_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/ttf_head.py
new file mode 100644
index 000000000..dfe97bdb7
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/ttf_head.py
@@ -0,0 +1,311 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import Constant, Normal
+from paddle.regularizer import L2Decay
+from ppdet.core.workspace import register
+from ppdet.modeling.layers import DeformableConvV2, LiteConv
+import numpy as np
+
+
+@register
+class HMHead(nn.Layer):
+ """
+ Args:
+ ch_in (int): The channel number of input Tensor.
+ ch_out (int): The channel number of output Tensor.
+ num_classes (int): Number of classes.
+ conv_num (int): The convolution number of hm_feat.
+ dcn_head(bool): whether use dcn in head. False by default.
+ lite_head(bool): whether use lite version. False by default.
+ norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
+ bn by default
+
+ Return:
+ Heatmap head output
+ """
+ __shared__ = ['num_classes', 'norm_type']
+
+ def __init__(
+ self,
+ ch_in,
+ ch_out=128,
+ num_classes=80,
+ conv_num=2,
+ dcn_head=False,
+ lite_head=False,
+ norm_type='bn', ):
+ super(HMHead, self).__init__()
+ head_conv = nn.Sequential()
+ for i in range(conv_num):
+ name = 'conv.{}'.format(i)
+ if lite_head:
+ lite_name = 'hm.' + name
+ head_conv.add_sublayer(
+ lite_name,
+ LiteConv(
+ in_channels=ch_in if i == 0 else ch_out,
+ out_channels=ch_out,
+ norm_type=norm_type))
+ else:
+ if dcn_head:
+ head_conv.add_sublayer(
+ name,
+ DeformableConvV2(
+ in_channels=ch_in if i == 0 else ch_out,
+ out_channels=ch_out,
+ kernel_size=3,
+ weight_attr=ParamAttr(initializer=Normal(0, 0.01))))
+ else:
+ head_conv.add_sublayer(
+ name,
+ nn.Conv2D(
+ in_channels=ch_in if i == 0 else ch_out,
+ out_channels=ch_out,
+ kernel_size=3,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
+ bias_attr=ParamAttr(
+ learning_rate=2., regularizer=L2Decay(0.))))
+ head_conv.add_sublayer(name + '.act', nn.ReLU())
+ self.feat = head_conv
+ bias_init = float(-np.log((1 - 0.01) / 0.01))
+ weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,
+ 0.01))
+ self.head = nn.Conv2D(
+ in_channels=ch_out,
+ out_channels=num_classes,
+ kernel_size=1,
+ weight_attr=weight_attr,
+ bias_attr=ParamAttr(
+ learning_rate=2.,
+ regularizer=L2Decay(0.),
+ initializer=Constant(bias_init)))
+
+ def forward(self, feat):
+ out = self.feat(feat)
+ out = self.head(out)
+ return out
+
+
+@register
+class WHHead(nn.Layer):
+ """
+ Args:
+ ch_in (int): The channel number of input Tensor.
+ ch_out (int): The channel number of output Tensor.
+ conv_num (int): The convolution number of wh_feat.
+ dcn_head(bool): whether use dcn in head. False by default.
+ lite_head(bool): whether use lite version. False by default.
+ norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
+ bn by default
+ Return:
+ Width & Height head output
+ """
+ __shared__ = ['norm_type']
+
+ def __init__(self,
+ ch_in,
+ ch_out=64,
+ conv_num=2,
+ dcn_head=False,
+ lite_head=False,
+ norm_type='bn'):
+ super(WHHead, self).__init__()
+ head_conv = nn.Sequential()
+ for i in range(conv_num):
+ name = 'conv.{}'.format(i)
+ if lite_head:
+ lite_name = 'wh.' + name
+ head_conv.add_sublayer(
+ lite_name,
+ LiteConv(
+ in_channels=ch_in if i == 0 else ch_out,
+ out_channels=ch_out,
+ norm_type=norm_type))
+ else:
+ if dcn_head:
+ head_conv.add_sublayer(
+ name,
+ DeformableConvV2(
+ in_channels=ch_in if i == 0 else ch_out,
+ out_channels=ch_out,
+ kernel_size=3,
+ weight_attr=ParamAttr(initializer=Normal(0, 0.01))))
+ else:
+ head_conv.add_sublayer(
+ name,
+ nn.Conv2D(
+ in_channels=ch_in if i == 0 else ch_out,
+ out_channels=ch_out,
+ kernel_size=3,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
+ bias_attr=ParamAttr(
+ learning_rate=2., regularizer=L2Decay(0.))))
+ head_conv.add_sublayer(name + '.act', nn.ReLU())
+
+ weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,
+ 0.01))
+ self.feat = head_conv
+ self.head = nn.Conv2D(
+ in_channels=ch_out,
+ out_channels=4,
+ kernel_size=1,
+ weight_attr=weight_attr,
+ bias_attr=ParamAttr(
+ learning_rate=2., regularizer=L2Decay(0.)))
+
+ def forward(self, feat):
+ out = self.feat(feat)
+ out = self.head(out)
+ out = F.relu(out)
+ return out
+
+
+@register
+class TTFHead(nn.Layer):
+ """
+ TTFHead
+ Args:
+ in_channels (int): the channel number of input to TTFHead.
+ num_classes (int): the number of classes, 80 by default.
+ hm_head_planes (int): the channel number in heatmap head,
+ 128 by default.
+ wh_head_planes (int): the channel number in width & height head,
+ 64 by default.
+ hm_head_conv_num (int): the number of convolution in heatmap head,
+ 2 by default.
+ wh_head_conv_num (int): the number of convolution in width & height
+ head, 2 by default.
+ hm_loss (object): Instance of 'CTFocalLoss'.
+ wh_loss (object): Instance of 'GIoULoss'.
+ wh_offset_base (float): the base offset of width and height,
+ 16.0 by default.
+ down_ratio (int): the actual down_ratio is calculated by base_down_ratio
+ (default 16) and the number of upsample layers.
+ lite_head(bool): whether use lite version. False by default.
+ norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
+ bn by default
+ ags_module(bool): whether use AGS module to reweight location feature.
+ false by default.
+
+ """
+
+ __shared__ = ['num_classes', 'down_ratio', 'norm_type']
+ __inject__ = ['hm_loss', 'wh_loss']
+
+ def __init__(self,
+ in_channels,
+ num_classes=80,
+ hm_head_planes=128,
+ wh_head_planes=64,
+ hm_head_conv_num=2,
+ wh_head_conv_num=2,
+ hm_loss='CTFocalLoss',
+ wh_loss='GIoULoss',
+ wh_offset_base=16.,
+ down_ratio=4,
+ dcn_head=False,
+ lite_head=False,
+ norm_type='bn',
+ ags_module=False):
+ super(TTFHead, self).__init__()
+ self.in_channels = in_channels
+ self.hm_head = HMHead(in_channels, hm_head_planes, num_classes,
+ hm_head_conv_num, dcn_head, lite_head, norm_type)
+ self.wh_head = WHHead(in_channels, wh_head_planes, wh_head_conv_num,
+ dcn_head, lite_head, norm_type)
+ self.hm_loss = hm_loss
+ self.wh_loss = wh_loss
+
+ self.wh_offset_base = wh_offset_base
+ self.down_ratio = down_ratio
+ self.ags_module = ags_module
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ if isinstance(input_shape, (list, tuple)):
+ input_shape = input_shape[0]
+ return {'in_channels': input_shape.channels, }
+
+ def forward(self, feats):
+ hm = self.hm_head(feats)
+ wh = self.wh_head(feats) * self.wh_offset_base
+ return hm, wh
+
+ def filter_box_by_weight(self, pred, target, weight):
+ """
+ Filter out boxes where ttf_reg_weight is 0, only keep positive samples.
+ """
+ index = paddle.nonzero(weight > 0)
+ index.stop_gradient = True
+ weight = paddle.gather_nd(weight, index)
+ pred = paddle.gather_nd(pred, index)
+ target = paddle.gather_nd(target, index)
+ return pred, target, weight
+
+ def filter_loc_by_weight(self, score, weight):
+ index = paddle.nonzero(weight > 0)
+ index.stop_gradient = True
+ score = paddle.gather_nd(score, index)
+ return score
+
+ def get_loss(self, pred_hm, pred_wh, target_hm, box_target, target_weight):
+ pred_hm = paddle.clip(F.sigmoid(pred_hm), 1e-4, 1 - 1e-4)
+ hm_loss = self.hm_loss(pred_hm, target_hm)
+ H, W = target_hm.shape[2:]
+ mask = paddle.reshape(target_weight, [-1, H, W])
+ avg_factor = paddle.sum(mask) + 1e-4
+
+ base_step = self.down_ratio
+ shifts_x = paddle.arange(0, W * base_step, base_step, dtype='int32')
+ shifts_y = paddle.arange(0, H * base_step, base_step, dtype='int32')
+ shift_y, shift_x = paddle.tensor.meshgrid([shifts_y, shifts_x])
+ base_loc = paddle.stack([shift_x, shift_y], axis=0)
+ base_loc.stop_gradient = True
+
+ pred_boxes = paddle.concat(
+ [0 - pred_wh[:, 0:2, :, :] + base_loc, pred_wh[:, 2:4] + base_loc],
+ axis=1)
+ pred_boxes = paddle.transpose(pred_boxes, [0, 2, 3, 1])
+ boxes = paddle.transpose(box_target, [0, 2, 3, 1])
+ boxes.stop_gradient = True
+
+ if self.ags_module:
+ pred_hm_max = paddle.max(pred_hm, axis=1, keepdim=True)
+ pred_hm_max_softmax = F.softmax(pred_hm_max, axis=1)
+ pred_hm_max_softmax = paddle.transpose(pred_hm_max_softmax,
+ [0, 2, 3, 1])
+ pred_hm_max_softmax = self.filter_loc_by_weight(pred_hm_max_softmax,
+ mask)
+ else:
+ pred_hm_max_softmax = None
+
+ pred_boxes, boxes, mask = self.filter_box_by_weight(pred_boxes, boxes,
+ mask)
+ mask.stop_gradient = True
+ wh_loss = self.wh_loss(
+ pred_boxes,
+ boxes,
+ iou_weight=mask.unsqueeze(1),
+ loc_reweight=pred_hm_max_softmax)
+ wh_loss = wh_loss / avg_factor
+
+ ttf_loss = {'hm_loss': hm_loss, 'wh_loss': wh_loss}
+ return ttf_loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/yolo_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/yolo_head.py
new file mode 100644
index 000000000..7b4e9bc33
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/heads/yolo_head.py
@@ -0,0 +1,124 @@
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.regularizer import L2Decay
+from ppdet.core.workspace import register
+
+
+def _de_sigmoid(x, eps=1e-7):
+ x = paddle.clip(x, eps, 1. / eps)
+ x = paddle.clip(1. / x - 1., eps, 1. / eps)
+ x = -paddle.log(x)
+ return x
+
+
+@register
+class YOLOv3Head(nn.Layer):
+ __shared__ = ['num_classes', 'data_format']
+ __inject__ = ['loss']
+
+ def __init__(self,
+ in_channels=[1024, 512, 256],
+ anchors=[[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
+ [59, 119], [116, 90], [156, 198], [373, 326]],
+ anchor_masks=[[6, 7, 8], [3, 4, 5], [0, 1, 2]],
+ num_classes=80,
+ loss='YOLOv3Loss',
+ iou_aware=False,
+ iou_aware_factor=0.4,
+ data_format='NCHW'):
+ """
+ Head for YOLOv3 network
+
+ Args:
+ num_classes (int): number of foreground classes
+ anchors (list): anchors
+ anchor_masks (list): anchor masks
+ loss (object): YOLOv3Loss instance
+ iou_aware (bool): whether to use iou_aware
+ iou_aware_factor (float): iou aware factor
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(YOLOv3Head, self).__init__()
+ assert len(in_channels) > 0, "in_channels length should > 0"
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.loss = loss
+
+ self.iou_aware = iou_aware
+ self.iou_aware_factor = iou_aware_factor
+
+ self.parse_anchor(anchors, anchor_masks)
+ self.num_outputs = len(self.anchors)
+ self.data_format = data_format
+
+ self.yolo_outputs = []
+ for i in range(len(self.anchors)):
+
+ if self.iou_aware:
+ num_filters = len(self.anchors[i]) * (self.num_classes + 6)
+ else:
+ num_filters = len(self.anchors[i]) * (self.num_classes + 5)
+ name = 'yolo_output.{}'.format(i)
+ conv = nn.Conv2D(
+ in_channels=self.in_channels[i],
+ out_channels=num_filters,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ data_format=data_format,
+ bias_attr=ParamAttr(regularizer=L2Decay(0.)))
+ conv.skip_quant = True
+ yolo_output = self.add_sublayer(name, conv)
+ self.yolo_outputs.append(yolo_output)
+
+ def parse_anchor(self, anchors, anchor_masks):
+ self.anchors = [[anchors[i] for i in mask] for mask in anchor_masks]
+ self.mask_anchors = []
+ anchor_num = len(anchors)
+ for masks in anchor_masks:
+ self.mask_anchors.append([])
+ for mask in masks:
+ assert mask < anchor_num, "anchor mask index overflow"
+ self.mask_anchors[-1].extend(anchors[mask])
+
+ def forward(self, feats, targets=None):
+ assert len(feats) == len(self.anchors)
+ yolo_outputs = []
+ for i, feat in enumerate(feats):
+ yolo_output = self.yolo_outputs[i](feat)
+ if self.data_format == 'NHWC':
+ yolo_output = paddle.transpose(yolo_output, [0, 3, 1, 2])
+ yolo_outputs.append(yolo_output)
+
+ if self.training:
+ return self.loss(yolo_outputs, targets, self.anchors)
+ else:
+ if self.iou_aware:
+ y = []
+ for i, out in enumerate(yolo_outputs):
+ na = len(self.anchors[i])
+ ioup, x = out[:, 0:na, :, :], out[:, na:, :, :]
+ b, c, h, w = x.shape
+ no = c // na
+ x = x.reshape((b, na, no, h * w))
+ ioup = ioup.reshape((b, na, 1, h * w))
+ obj = x[:, :, 4:5, :]
+ ioup = F.sigmoid(ioup)
+ obj = F.sigmoid(obj)
+ obj_t = (obj**(1 - self.iou_aware_factor)) * (
+ ioup**self.iou_aware_factor)
+ obj_t = _de_sigmoid(obj_t)
+ loc_t = x[:, :, :4, :]
+ cls_t = x[:, :, 5:, :]
+ y_t = paddle.concat([loc_t, obj_t, cls_t], axis=2)
+ y_t = y_t.reshape((b, c, h, w))
+ y.append(y_t)
+ return y
+ else:
+ return yolo_outputs
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape], }
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/initializer.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/initializer.py
new file mode 100644
index 000000000..b7a135dcc
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/initializer.py
@@ -0,0 +1,317 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
+Ths copyright of pytorch/pytorch is a BSD-style license, as found in the LICENSE file.
+"""
+
+import math
+import numpy as np
+
+import paddle
+import paddle.nn as nn
+
+__all__ = [
+ 'uniform_',
+ 'normal_',
+ 'constant_',
+ 'ones_',
+ 'zeros_',
+ 'xavier_uniform_',
+ 'xavier_normal_',
+ 'kaiming_uniform_',
+ 'kaiming_normal_',
+ 'linear_init_',
+ 'conv_init_',
+ 'reset_initialized_parameter',
+]
+
+
+def _no_grad_uniform_(tensor, a, b):
+ with paddle.no_grad():
+ tensor.set_value(
+ paddle.uniform(
+ shape=tensor.shape, dtype=tensor.dtype, min=a, max=b))
+ return tensor
+
+
+def _no_grad_normal_(tensor, mean=0., std=1.):
+ with paddle.no_grad():
+ tensor.set_value(paddle.normal(mean=mean, std=std, shape=tensor.shape))
+ return tensor
+
+
+def _no_grad_fill_(tensor, value=0.):
+ with paddle.no_grad():
+ tensor.set_value(paddle.full_like(tensor, value, dtype=tensor.dtype))
+ return tensor
+
+
+def uniform_(tensor, a, b):
+ """
+ Modified tensor inspace using uniform_
+ Args:
+ tensor (paddle.Tensor): paddle Tensor
+ a (float|int): min value.
+ b (float|int): max value.
+ Return:
+ tensor
+ """
+ return _no_grad_uniform_(tensor, a, b)
+
+
+def normal_(tensor, mean=0., std=1.):
+ """
+ Modified tensor inspace using normal_
+ Args:
+ tensor (paddle.Tensor): paddle Tensor
+ mean (float|int): mean value.
+ std (float|int): std value.
+ Return:
+ tensor
+ """
+ return _no_grad_normal_(tensor, mean, std)
+
+
+def constant_(tensor, value=0.):
+ """
+ Modified tensor inspace using constant_
+ Args:
+ tensor (paddle.Tensor): paddle Tensor
+ value (float|int): value to fill tensor.
+ Return:
+ tensor
+ """
+ return _no_grad_fill_(tensor, value)
+
+
+def ones_(tensor):
+ """
+ Modified tensor inspace using ones_
+ Args:
+ tensor (paddle.Tensor): paddle Tensor
+ Return:
+ tensor
+ """
+ return _no_grad_fill_(tensor, 1)
+
+
+def zeros_(tensor):
+ """
+ Modified tensor inspace using zeros_
+ Args:
+ tensor (paddle.Tensor): paddle Tensor
+ Return:
+ tensor
+ """
+ return _no_grad_fill_(tensor, 0)
+
+
+def _calculate_fan_in_and_fan_out(tensor, reverse=False):
+ """
+ Calculate (fan_in, _fan_out) for tensor
+
+ Args:
+ tensor (Tensor): paddle.Tensor
+ reverse (bool: False): tensor data format order, False by default as [fout, fin, ...]. e.g. : conv.weight [cout, cin, kh, kw] is False; linear.weight [cin, cout] is True
+
+ Return:
+ Tuple[fan_in, fan_out]
+ """
+ if tensor.ndim < 2:
+ raise ValueError(
+ "Fan in and fan out can not be computed for tensor with fewer than 2 dimensions"
+ )
+
+ if reverse:
+ num_input_fmaps, num_output_fmaps = tensor.shape[0], tensor.shape[1]
+ else:
+ num_input_fmaps, num_output_fmaps = tensor.shape[1], tensor.shape[0]
+
+ receptive_field_size = 1
+ if tensor.ndim > 2:
+ receptive_field_size = np.prod(tensor.shape[2:])
+
+ fan_in = num_input_fmaps * receptive_field_size
+ fan_out = num_output_fmaps * receptive_field_size
+
+ return fan_in, fan_out
+
+
+def xavier_uniform_(tensor, gain=1., reverse=False):
+ """
+ Modified tensor inspace using xavier_uniform_
+ Args:
+ tensor (paddle.Tensor): paddle Tensor
+ gain (float): super parameter, 1. default.
+ reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
+ Return:
+ tensor
+ """
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse)
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
+ k = math.sqrt(3.0) * std
+ return _no_grad_uniform_(tensor, -k, k)
+
+
+def xavier_normal_(tensor, gain=1., reverse=False):
+ """
+ Modified tensor inspace using xavier_normal_
+ Args:
+ tensor (paddle.Tensor): paddle Tensor
+ gain (float): super parameter, 1. default.
+ reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
+ Return:
+ tensor
+ """
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse)
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
+ return _no_grad_normal_(tensor, 0, std)
+
+
+# reference: https://pytorch.org/docs/stable/_modules/torch/nn/init.html
+def _calculate_correct_fan(tensor, mode, reverse=False):
+ mode = mode.lower()
+ valid_modes = ['fan_in', 'fan_out']
+ if mode not in valid_modes:
+ raise ValueError("Mode {} not supported, please use one of {}".format(
+ mode, valid_modes))
+
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse)
+
+ return fan_in if mode == 'fan_in' else fan_out
+
+
+def _calculate_gain(nonlinearity, param=None):
+ linear_fns = [
+ 'linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d',
+ 'conv_transpose2d', 'conv_transpose3d'
+ ]
+ if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
+ return 1
+ elif nonlinearity == 'tanh':
+ return 5.0 / 3
+ elif nonlinearity == 'relu':
+ return math.sqrt(2.0)
+ elif nonlinearity == 'leaky_relu':
+ if param is None:
+ negative_slope = 0.01
+ elif not isinstance(param, bool) and isinstance(
+ param, int) or isinstance(param, float):
+ # True/False are instances of int, hence check above
+ negative_slope = param
+ else:
+ raise ValueError("negative_slope {} not a valid number".format(
+ param))
+ return math.sqrt(2.0 / (1 + negative_slope**2))
+ elif nonlinearity == 'selu':
+ return 3.0 / 4
+ else:
+ raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
+
+
+def kaiming_uniform_(tensor,
+ a=0,
+ mode='fan_in',
+ nonlinearity='leaky_relu',
+ reverse=False):
+ """
+ Modified tensor inspace using kaiming_uniform method
+ Args:
+ tensor (paddle.Tensor): paddle Tensor
+ mode (str): ['fan_in', 'fan_out'], 'fin_in' defalut
+ nonlinearity (str): nonlinearity method name
+ reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
+ Return:
+ tensor
+ """
+ fan = _calculate_correct_fan(tensor, mode, reverse)
+ gain = _calculate_gain(nonlinearity, a)
+ std = gain / math.sqrt(fan)
+ k = math.sqrt(3.0) * std
+ return _no_grad_uniform_(tensor, -k, k)
+
+
+def kaiming_normal_(tensor,
+ a=0,
+ mode='fan_in',
+ nonlinearity='leaky_relu',
+ reverse=False):
+ """
+ Modified tensor inspace using kaiming_normal_
+ Args:
+ tensor (paddle.Tensor): paddle Tensor
+ mode (str): ['fan_in', 'fan_out'], 'fin_in' defalut
+ nonlinearity (str): nonlinearity method name
+ reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
+ Return:
+ tensor
+ """
+ fan = _calculate_correct_fan(tensor, mode, reverse)
+ gain = _calculate_gain(nonlinearity, a)
+ std = gain / math.sqrt(fan)
+ return _no_grad_normal_(tensor, 0, std)
+
+
+def linear_init_(module):
+ bound = 1 / math.sqrt(module.weight.shape[0])
+ uniform_(module.weight, -bound, bound)
+ uniform_(module.bias, -bound, bound)
+
+
+def conv_init_(module):
+ bound = 1 / np.sqrt(np.prod(module.weight.shape[1:]))
+ uniform_(module.weight, -bound, bound)
+ uniform_(module.bias, -bound, bound)
+
+
+def bias_init_with_prob(prior_prob=0.01):
+ """initialize conv/fc bias value according to a given probability value."""
+ bias_init = float(-np.log((1 - prior_prob) / prior_prob))
+ return bias_init
+
+
+@paddle.no_grad()
+def reset_initialized_parameter(model, include_self=True):
+ """
+ Reset initialized parameter using following method for [conv, linear, embedding, bn]
+
+ Args:
+ model (paddle.Layer): paddle Layer
+ include_self (bool: False): include_self for Layer.named_sublayers method. Indicate whether including itself
+ Return:
+ None
+ """
+ for _, m in model.named_sublayers(include_self=include_self):
+ if isinstance(m, nn.Conv2D):
+ k = float(m._groups) / (m._in_channels * m._kernel_size[0] *
+ m._kernel_size[1])
+ k = math.sqrt(k)
+ _no_grad_uniform_(m.weight, -k, k)
+ if hasattr(m, 'bias') and getattr(m, 'bias') is not None:
+ _no_grad_uniform_(m.bias, -k, k)
+
+ elif isinstance(m, nn.Linear):
+ k = math.sqrt(1. / m.weight.shape[0])
+ _no_grad_uniform_(m.weight, -k, k)
+ if hasattr(m, 'bias') and getattr(m, 'bias') is not None:
+ _no_grad_uniform_(m.bias, -k, k)
+
+ elif isinstance(m, nn.Embedding):
+ _no_grad_normal_(m.weight, mean=0., std=1.)
+
+ elif isinstance(m, (nn.BatchNorm2D, nn.LayerNorm)):
+ _no_grad_fill_(m.weight, 1.)
+ if hasattr(m, 'bias') and getattr(m, 'bias') is not None:
+ _no_grad_fill_(m.bias, 0)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/keypoint_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/keypoint_utils.py
new file mode 100644
index 000000000..b3f84da7d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/keypoint_utils.py
@@ -0,0 +1,336 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cv2
+import numpy as np
+
+
+def get_affine_mat_kernel(h, w, s, inv=False):
+ if w < h:
+ w_ = s
+ h_ = int(np.ceil((s / w * h) / 64.) * 64)
+ scale_w = w
+ scale_h = h_ / w_ * w
+
+ else:
+ h_ = s
+ w_ = int(np.ceil((s / h * w) / 64.) * 64)
+ scale_h = h
+ scale_w = w_ / h_ * h
+
+ center = np.array([np.round(w / 2.), np.round(h / 2.)])
+
+ size_resized = (w_, h_)
+ trans = get_affine_transform(
+ center, np.array([scale_w, scale_h]), 0, size_resized, inv=inv)
+
+ return trans, size_resized
+
+
+def get_affine_transform(center,
+ input_size,
+ rot,
+ output_size,
+ shift=(0., 0.),
+ inv=False):
+ """Get the affine transform matrix, given the center/scale/rot/output_size.
+
+ Args:
+ center (np.ndarray[2, ]): Center of the bounding box (x, y).
+ input_size (np.ndarray[2, ]): Size of input feature (width, height).
+ rot (float): Rotation angle (degree).
+ output_size (np.ndarray[2, ]): Size of the destination heatmaps.
+ shift (0-100%): Shift translation ratio wrt the width/height.
+ Default (0., 0.).
+ inv (bool): Option to inverse the affine transform direction.
+ (inv=False: src->dst or inv=True: dst->src)
+
+ Returns:
+ np.ndarray: The transform matrix.
+ """
+ assert len(center) == 2
+ assert len(output_size) == 2
+ assert len(shift) == 2
+
+ if not isinstance(input_size, (np.ndarray, list)):
+ input_size = np.array([input_size, input_size], dtype=np.float32)
+ scale_tmp = input_size
+
+ shift = np.array(shift)
+ src_w = scale_tmp[0]
+ dst_w = output_size[0]
+ dst_h = output_size[1]
+
+ rot_rad = np.pi * rot / 180
+ src_dir = rotate_point([0., src_w * -0.5], rot_rad)
+ dst_dir = np.array([0., dst_w * -0.5])
+
+ src = np.zeros((3, 2), dtype=np.float32)
+
+ src[0, :] = center + scale_tmp * shift
+ src[1, :] = center + src_dir + scale_tmp * shift
+ src[2, :] = _get_3rd_point(src[0, :], src[1, :])
+
+ dst = np.zeros((3, 2), dtype=np.float32)
+ dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
+ dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
+ dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
+
+ if inv:
+ trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
+ else:
+ trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
+
+ return trans
+
+
+def get_warp_matrix(theta, size_input, size_dst, size_target):
+ """Calculate the transformation matrix under the constraint of unbiased.
+ Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
+ Data Processing for Human Pose Estimation (CVPR 2020).
+
+ Args:
+ theta (float): Rotation angle in degrees.
+ size_input (np.ndarray): Size of input image [w, h].
+ size_dst (np.ndarray): Size of output image [w, h].
+ size_target (np.ndarray): Size of ROI in input plane [w, h].
+
+ Returns:
+ matrix (np.ndarray): A matrix for transformation.
+ """
+ theta = np.deg2rad(theta)
+ matrix = np.zeros((2, 3), dtype=np.float32)
+ scale_x = size_dst[0] / size_target[0]
+ scale_y = size_dst[1] / size_target[1]
+ matrix[0, 0] = np.cos(theta) * scale_x
+ matrix[0, 1] = -np.sin(theta) * scale_x
+ matrix[0, 2] = scale_x * (
+ -0.5 * size_input[0] * np.cos(theta) + 0.5 * size_input[1] *
+ np.sin(theta) + 0.5 * size_target[0])
+ matrix[1, 0] = np.sin(theta) * scale_y
+ matrix[1, 1] = np.cos(theta) * scale_y
+ matrix[1, 2] = scale_y * (
+ -0.5 * size_input[0] * np.sin(theta) - 0.5 * size_input[1] *
+ np.cos(theta) + 0.5 * size_target[1])
+ return matrix
+
+
+def _get_3rd_point(a, b):
+ """To calculate the affine matrix, three pairs of points are required. This
+ function is used to get the 3rd point, given 2D points a & b.
+
+ The 3rd point is defined by rotating vector `a - b` by 90 degrees
+ anticlockwise, using b as the rotation center.
+
+ Args:
+ a (np.ndarray): point(x,y)
+ b (np.ndarray): point(x,y)
+
+ Returns:
+ np.ndarray: The 3rd point.
+ """
+ assert len(
+ a) == 2, 'input of _get_3rd_point should be point with length of 2'
+ assert len(
+ b) == 2, 'input of _get_3rd_point should be point with length of 2'
+ direction = a - b
+ third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)
+
+ return third_pt
+
+
+def rotate_point(pt, angle_rad):
+ """Rotate a point by an angle.
+
+ Args:
+ pt (list[float]): 2 dimensional point to be rotated
+ angle_rad (float): rotation angle by radian
+
+ Returns:
+ list[float]: Rotated point.
+ """
+ assert len(pt) == 2
+ sn, cs = np.sin(angle_rad), np.cos(angle_rad)
+ new_x = pt[0] * cs - pt[1] * sn
+ new_y = pt[0] * sn + pt[1] * cs
+ rotated_pt = [new_x, new_y]
+
+ return rotated_pt
+
+
+def transpred(kpts, h, w, s):
+ trans, _ = get_affine_mat_kernel(h, w, s, inv=True)
+
+ return warp_affine_joints(kpts[..., :2].copy(), trans)
+
+
+def warp_affine_joints(joints, mat):
+ """Apply affine transformation defined by the transform matrix on the
+ joints.
+
+ Args:
+ joints (np.ndarray[..., 2]): Origin coordinate of joints.
+ mat (np.ndarray[3, 2]): The affine matrix.
+
+ Returns:
+ matrix (np.ndarray[..., 2]): Result coordinate of joints.
+ """
+ joints = np.array(joints)
+ shape = joints.shape
+ joints = joints.reshape(-1, 2)
+ return np.dot(np.concatenate(
+ (joints, joints[:, 0:1] * 0 + 1), axis=1),
+ mat.T).reshape(shape)
+
+
+def affine_transform(pt, t):
+ new_pt = np.array([pt[0], pt[1], 1.]).T
+ new_pt = np.dot(t, new_pt)
+ return new_pt[:2]
+
+
+def transform_preds(coords, center, scale, output_size):
+ target_coords = np.zeros(coords.shape)
+ trans = get_affine_transform(center, scale * 200, 0, output_size, inv=1)
+ for p in range(coords.shape[0]):
+ target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
+ return target_coords
+
+
+def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):
+ if not isinstance(sigmas, np.ndarray):
+ sigmas = np.array([
+ .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07,
+ .87, .87, .89, .89
+ ]) / 10.0
+ vars = (sigmas * 2)**2
+ xg = g[0::3]
+ yg = g[1::3]
+ vg = g[2::3]
+ ious = np.zeros((d.shape[0]))
+ for n_d in range(0, d.shape[0]):
+ xd = d[n_d, 0::3]
+ yd = d[n_d, 1::3]
+ vd = d[n_d, 2::3]
+ dx = xd - xg
+ dy = yd - yg
+ e = (dx**2 + dy**2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2
+ if in_vis_thre is not None:
+ ind = list(vg > in_vis_thre) and list(vd > in_vis_thre)
+ e = e[ind]
+ ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0
+ return ious
+
+
+def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
+ """greedily select boxes with high confidence and overlap with current maximum <= thresh
+ rule out overlap >= thresh
+
+ Args:
+ kpts_db (list): The predicted keypoints within the image
+ thresh (float): The threshold to select the boxes
+ sigmas (np.array): The variance to calculate the oks iou
+ Default: None
+ in_vis_thre (float): The threshold to select the high confidence boxes
+ Default: None
+
+ Return:
+ keep (list): indexes to keep
+ """
+
+ if len(kpts_db) == 0:
+ return []
+
+ scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])
+ kpts = np.array(
+ [kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])
+ areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])
+
+ order = scores.argsort()[::-1]
+
+ keep = []
+ while order.size > 0:
+ i = order[0]
+ keep.append(i)
+
+ oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]],
+ sigmas, in_vis_thre)
+
+ inds = np.where(oks_ovr <= thresh)[0]
+ order = order[inds + 1]
+
+ return keep
+
+
+def rescore(overlap, scores, thresh, type='gaussian'):
+ assert overlap.shape[0] == scores.shape[0]
+ if type == 'linear':
+ inds = np.where(overlap >= thresh)[0]
+ scores[inds] = scores[inds] * (1 - overlap[inds])
+ else:
+ scores = scores * np.exp(-overlap**2 / thresh)
+
+ return scores
+
+
+def soft_oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
+ """greedily select boxes with high confidence and overlap with current maximum <= thresh
+ rule out overlap >= thresh
+
+ Args:
+ kpts_db (list): The predicted keypoints within the image
+ thresh (float): The threshold to select the boxes
+ sigmas (np.array): The variance to calculate the oks iou
+ Default: None
+ in_vis_thre (float): The threshold to select the high confidence boxes
+ Default: None
+
+ Return:
+ keep (list): indexes to keep
+ """
+
+ if len(kpts_db) == 0:
+ return []
+
+ scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])
+ kpts = np.array(
+ [kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])
+ areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])
+
+ order = scores.argsort()[::-1]
+ scores = scores[order]
+
+ # max_dets = order.size
+ max_dets = 20
+ keep = np.zeros(max_dets, dtype=np.intp)
+ keep_cnt = 0
+ while order.size > 0 and keep_cnt < max_dets:
+ i = order[0]
+
+ oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]],
+ sigmas, in_vis_thre)
+
+ order = order[1:]
+ scores = rescore(oks_ovr, scores[1:], thresh)
+
+ tmp = scores.argsort()[::-1]
+ order = order[tmp]
+ scores = scores[tmp]
+
+ keep[keep_cnt] = i
+ keep_cnt += 1
+
+ keep = keep[:keep_cnt]
+
+ return keep
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/layers.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/layers.py
new file mode 100644
index 000000000..73da16a14
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/layers.py
@@ -0,0 +1,1424 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import six
+import numpy as np
+from numbers import Integral
+
+import paddle
+import paddle.nn as nn
+from paddle import ParamAttr
+from paddle import to_tensor
+import paddle.nn.functional as F
+from paddle.nn.initializer import Normal, Constant, XavierUniform
+from paddle.regularizer import L2Decay
+
+from ppdet.core.workspace import register, serializable
+from ppdet.modeling.bbox_utils import delta2bbox
+from . import ops
+from .initializer import xavier_uniform_, constant_
+
+from paddle.vision.ops import DeformConv2D
+
+
+def _to_list(l):
+ if isinstance(l, (list, tuple)):
+ return list(l)
+ return [l]
+
+
+class DeformableConvV2(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ weight_attr=None,
+ bias_attr=None,
+ lr_scale=1,
+ regularizer=None,
+ skip_quant=False,
+ dcn_bias_regularizer=L2Decay(0.),
+ dcn_bias_lr_scale=2.):
+ super(DeformableConvV2, self).__init__()
+ self.offset_channel = 2 * kernel_size**2
+ self.mask_channel = kernel_size**2
+
+ if lr_scale == 1 and regularizer is None:
+ offset_bias_attr = ParamAttr(initializer=Constant(0.))
+ else:
+ offset_bias_attr = ParamAttr(
+ initializer=Constant(0.),
+ learning_rate=lr_scale,
+ regularizer=regularizer)
+ self.conv_offset = nn.Conv2D(
+ in_channels,
+ 3 * kernel_size**2,
+ kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ weight_attr=ParamAttr(initializer=Constant(0.0)),
+ bias_attr=offset_bias_attr)
+ if skip_quant:
+ self.conv_offset.skip_quant = True
+
+ if bias_attr:
+ # in FCOS-DCN head, specifically need learning_rate and regularizer
+ dcn_bias_attr = ParamAttr(
+ initializer=Constant(value=0),
+ regularizer=dcn_bias_regularizer,
+ learning_rate=dcn_bias_lr_scale)
+ else:
+ # in ResNet backbone, do not need bias
+ dcn_bias_attr = False
+ self.conv_dcn = DeformConv2D(
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2 * dilation,
+ dilation=dilation,
+ groups=groups,
+ weight_attr=weight_attr,
+ bias_attr=dcn_bias_attr)
+
+ def forward(self, x):
+ offset_mask = self.conv_offset(x)
+ offset, mask = paddle.split(
+ offset_mask,
+ num_or_sections=[self.offset_channel, self.mask_channel],
+ axis=1)
+ mask = F.sigmoid(mask)
+ y = self.conv_dcn(x, offset, mask=mask)
+ return y
+
+
+class ConvNormLayer(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ filter_size,
+ stride,
+ groups=1,
+ norm_type='bn',
+ norm_decay=0.,
+ norm_groups=32,
+ use_dcn=False,
+ bias_on=False,
+ lr_scale=1.,
+ freeze_norm=False,
+ initializer=Normal(
+ mean=0., std=0.01),
+ skip_quant=False,
+ dcn_lr_scale=2.,
+ dcn_regularizer=L2Decay(0.)):
+ super(ConvNormLayer, self).__init__()
+ assert norm_type in ['bn', 'sync_bn', 'gn']
+
+ if bias_on:
+ bias_attr = ParamAttr(
+ initializer=Constant(value=0.), learning_rate=lr_scale)
+ else:
+ bias_attr = False
+
+ if not use_dcn:
+ self.conv = nn.Conv2D(
+ in_channels=ch_in,
+ out_channels=ch_out,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(
+ initializer=initializer, learning_rate=1.),
+ bias_attr=bias_attr)
+ if skip_quant:
+ self.conv.skip_quant = True
+ else:
+ # in FCOS-DCN head, specifically need learning_rate and regularizer
+ self.conv = DeformableConvV2(
+ in_channels=ch_in,
+ out_channels=ch_out,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(
+ initializer=initializer, learning_rate=1.),
+ bias_attr=True,
+ lr_scale=dcn_lr_scale,
+ regularizer=dcn_regularizer,
+ dcn_bias_regularizer=dcn_regularizer,
+ dcn_bias_lr_scale=dcn_lr_scale,
+ skip_quant=skip_quant)
+
+ norm_lr = 0. if freeze_norm else 1.
+ param_attr = ParamAttr(
+ learning_rate=norm_lr,
+ regularizer=L2Decay(norm_decay) if norm_decay is not None else None)
+ bias_attr = ParamAttr(
+ learning_rate=norm_lr,
+ regularizer=L2Decay(norm_decay) if norm_decay is not None else None)
+ if norm_type == 'bn':
+ self.norm = nn.BatchNorm2D(
+ ch_out, weight_attr=param_attr, bias_attr=bias_attr)
+ elif norm_type == 'sync_bn':
+ self.norm = nn.SyncBatchNorm(
+ ch_out, weight_attr=param_attr, bias_attr=bias_attr)
+ elif norm_type == 'gn':
+ self.norm = nn.GroupNorm(
+ num_groups=norm_groups,
+ num_channels=ch_out,
+ weight_attr=param_attr,
+ bias_attr=bias_attr)
+
+ def forward(self, inputs):
+ out = self.conv(inputs)
+ out = self.norm(out)
+ return out
+
+
+class LiteConv(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ stride=1,
+ with_act=True,
+ norm_type='sync_bn',
+ name=None):
+ super(LiteConv, self).__init__()
+ self.lite_conv = nn.Sequential()
+ conv1 = ConvNormLayer(
+ in_channels,
+ in_channels,
+ filter_size=5,
+ stride=stride,
+ groups=in_channels,
+ norm_type=norm_type,
+ initializer=XavierUniform())
+ conv2 = ConvNormLayer(
+ in_channels,
+ out_channels,
+ filter_size=1,
+ stride=stride,
+ norm_type=norm_type,
+ initializer=XavierUniform())
+ conv3 = ConvNormLayer(
+ out_channels,
+ out_channels,
+ filter_size=1,
+ stride=stride,
+ norm_type=norm_type,
+ initializer=XavierUniform())
+ conv4 = ConvNormLayer(
+ out_channels,
+ out_channels,
+ filter_size=5,
+ stride=stride,
+ groups=out_channels,
+ norm_type=norm_type,
+ initializer=XavierUniform())
+ conv_list = [conv1, conv2, conv3, conv4]
+ self.lite_conv.add_sublayer('conv1', conv1)
+ self.lite_conv.add_sublayer('relu6_1', nn.ReLU6())
+ self.lite_conv.add_sublayer('conv2', conv2)
+ if with_act:
+ self.lite_conv.add_sublayer('relu6_2', nn.ReLU6())
+ self.lite_conv.add_sublayer('conv3', conv3)
+ self.lite_conv.add_sublayer('relu6_3', nn.ReLU6())
+ self.lite_conv.add_sublayer('conv4', conv4)
+ if with_act:
+ self.lite_conv.add_sublayer('relu6_4', nn.ReLU6())
+
+ def forward(self, inputs):
+ out = self.lite_conv(inputs)
+ return out
+
+
+class DropBlock(nn.Layer):
+ def __init__(self, block_size, keep_prob, name, data_format='NCHW'):
+ """
+ DropBlock layer, see https://arxiv.org/abs/1810.12890
+
+ Args:
+ block_size (int): block size
+ keep_prob (int): keep probability
+ name (str): layer name
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(DropBlock, self).__init__()
+ self.block_size = block_size
+ self.keep_prob = keep_prob
+ self.name = name
+ self.data_format = data_format
+
+ def forward(self, x):
+ if not self.training or self.keep_prob == 1:
+ return x
+ else:
+ gamma = (1. - self.keep_prob) / (self.block_size**2)
+ if self.data_format == 'NCHW':
+ shape = x.shape[2:]
+ else:
+ shape = x.shape[1:3]
+ for s in shape:
+ gamma *= s / (s - self.block_size + 1)
+
+ matrix = paddle.cast(paddle.rand(x.shape) < gamma, x.dtype)
+ mask_inv = F.max_pool2d(
+ matrix,
+ self.block_size,
+ stride=1,
+ padding=self.block_size // 2,
+ data_format=self.data_format)
+ mask = 1. - mask_inv
+ y = x * mask * (mask.numel() / mask.sum())
+ return y
+
+
+@register
+@serializable
+class AnchorGeneratorSSD(object):
+ def __init__(self,
+ steps=[8, 16, 32, 64, 100, 300],
+ aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
+ min_ratio=15,
+ max_ratio=90,
+ base_size=300,
+ min_sizes=[30.0, 60.0, 111.0, 162.0, 213.0, 264.0],
+ max_sizes=[60.0, 111.0, 162.0, 213.0, 264.0, 315.0],
+ offset=0.5,
+ flip=True,
+ clip=False,
+ min_max_aspect_ratios_order=False):
+ self.steps = steps
+ self.aspect_ratios = aspect_ratios
+ self.min_ratio = min_ratio
+ self.max_ratio = max_ratio
+ self.base_size = base_size
+ self.min_sizes = min_sizes
+ self.max_sizes = max_sizes
+ self.offset = offset
+ self.flip = flip
+ self.clip = clip
+ self.min_max_aspect_ratios_order = min_max_aspect_ratios_order
+
+ if self.min_sizes == [] and self.max_sizes == []:
+ num_layer = len(aspect_ratios)
+ step = int(
+ math.floor(((self.max_ratio - self.min_ratio)) / (num_layer - 2
+ )))
+ for ratio in six.moves.range(self.min_ratio, self.max_ratio + 1,
+ step):
+ self.min_sizes.append(self.base_size * ratio / 100.)
+ self.max_sizes.append(self.base_size * (ratio + step) / 100.)
+ self.min_sizes = [self.base_size * .10] + self.min_sizes
+ self.max_sizes = [self.base_size * .20] + self.max_sizes
+
+ self.num_priors = []
+ for aspect_ratio, min_size, max_size in zip(
+ aspect_ratios, self.min_sizes, self.max_sizes):
+ if isinstance(min_size, (list, tuple)):
+ self.num_priors.append(
+ len(_to_list(min_size)) + len(_to_list(max_size)))
+ else:
+ self.num_priors.append((len(aspect_ratio) * 2 + 1) * len(
+ _to_list(min_size)) + len(_to_list(max_size)))
+
+ def __call__(self, inputs, image):
+ boxes = []
+ for input, min_size, max_size, aspect_ratio, step in zip(
+ inputs, self.min_sizes, self.max_sizes, self.aspect_ratios,
+ self.steps):
+ box, _ = ops.prior_box(
+ input=input,
+ image=image,
+ min_sizes=_to_list(min_size),
+ max_sizes=_to_list(max_size),
+ aspect_ratios=aspect_ratio,
+ flip=self.flip,
+ clip=self.clip,
+ steps=[step, step],
+ offset=self.offset,
+ min_max_aspect_ratios_order=self.min_max_aspect_ratios_order)
+ boxes.append(paddle.reshape(box, [-1, 4]))
+ return boxes
+
+
+@register
+@serializable
+class RCNNBox(object):
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ prior_box_var=[10., 10., 5., 5.],
+ code_type="decode_center_size",
+ box_normalized=False,
+ num_classes=80):
+ super(RCNNBox, self).__init__()
+ self.prior_box_var = prior_box_var
+ self.code_type = code_type
+ self.box_normalized = box_normalized
+ self.num_classes = num_classes
+
+ def __call__(self, bbox_head_out, rois, im_shape, scale_factor):
+ bbox_pred = bbox_head_out[0]
+ cls_prob = bbox_head_out[1]
+ roi = rois[0]
+ rois_num = rois[1]
+
+ origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
+ scale_list = []
+ origin_shape_list = []
+
+ batch_size = 1
+ if isinstance(roi, list):
+ batch_size = len(roi)
+ else:
+ batch_size = paddle.slice(paddle.shape(im_shape), [0], [0], [1])
+ # bbox_pred.shape: [N, C*4]
+ for idx in range(batch_size):
+ roi_per_im = roi[idx]
+ rois_num_per_im = rois_num[idx]
+ expand_im_shape = paddle.expand(im_shape[idx, :],
+ [rois_num_per_im, 2])
+ origin_shape_list.append(expand_im_shape)
+
+ origin_shape = paddle.concat(origin_shape_list)
+
+ # bbox_pred.shape: [N, C*4]
+ # C=num_classes in faster/mask rcnn(bbox_head), C=1 in cascade rcnn(cascade_head)
+ bbox = paddle.concat(roi)
+ if bbox.shape[0] == 0:
+ bbox = paddle.zeros([0, bbox_pred.shape[1]], dtype='float32')
+ else:
+ bbox = delta2bbox(bbox_pred, bbox, self.prior_box_var)
+ scores = cls_prob[:, :-1]
+
+ # bbox.shape: [N, C, 4]
+ # bbox.shape[1] must be equal to scores.shape[1]
+ bbox_num_class = bbox.shape[1]
+ if bbox_num_class == 1:
+ bbox = paddle.tile(bbox, [1, self.num_classes, 1])
+
+ origin_h = paddle.unsqueeze(origin_shape[:, 0], axis=1)
+ origin_w = paddle.unsqueeze(origin_shape[:, 1], axis=1)
+ zeros = paddle.zeros_like(origin_h)
+ x1 = paddle.maximum(paddle.minimum(bbox[:, :, 0], origin_w), zeros)
+ y1 = paddle.maximum(paddle.minimum(bbox[:, :, 1], origin_h), zeros)
+ x2 = paddle.maximum(paddle.minimum(bbox[:, :, 2], origin_w), zeros)
+ y2 = paddle.maximum(paddle.minimum(bbox[:, :, 3], origin_h), zeros)
+ bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
+ bboxes = (bbox, rois_num)
+ return bboxes, scores
+
+
+@register
+@serializable
+class MultiClassNMS(object):
+ def __init__(self,
+ score_threshold=.05,
+ nms_top_k=-1,
+ keep_top_k=100,
+ nms_threshold=.5,
+ normalized=True,
+ nms_eta=1.0,
+ return_index=False,
+ return_rois_num=True):
+ super(MultiClassNMS, self).__init__()
+ self.score_threshold = score_threshold
+ self.nms_top_k = nms_top_k
+ self.keep_top_k = keep_top_k
+ self.nms_threshold = nms_threshold
+ self.normalized = normalized
+ self.nms_eta = nms_eta
+ self.return_index = return_index
+ self.return_rois_num = return_rois_num
+
+ def __call__(self, bboxes, score, background_label=-1):
+ """
+ bboxes (Tensor|List[Tensor]): 1. (Tensor) Predicted bboxes with shape
+ [N, M, 4], N is the batch size and M
+ is the number of bboxes
+ 2. (List[Tensor]) bboxes and bbox_num,
+ bboxes have shape of [M, C, 4], C
+ is the class number and bbox_num means
+ the number of bboxes of each batch with
+ shape [N,]
+ score (Tensor): Predicted scores with shape [N, C, M] or [M, C]
+ background_label (int): Ignore the background label; For example, RCNN
+ is num_classes and YOLO is -1.
+ """
+ kwargs = self.__dict__.copy()
+ if isinstance(bboxes, tuple):
+ bboxes, bbox_num = bboxes
+ kwargs.update({'rois_num': bbox_num})
+ if background_label > -1:
+ kwargs.update({'background_label': background_label})
+ return ops.multiclass_nms(bboxes, score, **kwargs)
+
+
+@register
+@serializable
+class MatrixNMS(object):
+ __append_doc__ = True
+
+ def __init__(self,
+ score_threshold=.05,
+ post_threshold=.05,
+ nms_top_k=-1,
+ keep_top_k=100,
+ use_gaussian=False,
+ gaussian_sigma=2.,
+ normalized=False,
+ background_label=0):
+ super(MatrixNMS, self).__init__()
+ self.score_threshold = score_threshold
+ self.post_threshold = post_threshold
+ self.nms_top_k = nms_top_k
+ self.keep_top_k = keep_top_k
+ self.normalized = normalized
+ self.use_gaussian = use_gaussian
+ self.gaussian_sigma = gaussian_sigma
+ self.background_label = background_label
+
+ def __call__(self, bbox, score, *args):
+ return ops.matrix_nms(
+ bboxes=bbox,
+ scores=score,
+ score_threshold=self.score_threshold,
+ post_threshold=self.post_threshold,
+ nms_top_k=self.nms_top_k,
+ keep_top_k=self.keep_top_k,
+ use_gaussian=self.use_gaussian,
+ gaussian_sigma=self.gaussian_sigma,
+ background_label=self.background_label,
+ normalized=self.normalized)
+
+
+@register
+@serializable
+class YOLOBox(object):
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ num_classes=80,
+ conf_thresh=0.005,
+ downsample_ratio=32,
+ clip_bbox=True,
+ scale_x_y=1.):
+ self.num_classes = num_classes
+ self.conf_thresh = conf_thresh
+ self.downsample_ratio = downsample_ratio
+ self.clip_bbox = clip_bbox
+ self.scale_x_y = scale_x_y
+
+ def __call__(self,
+ yolo_head_out,
+ anchors,
+ im_shape,
+ scale_factor,
+ var_weight=None):
+ boxes_list = []
+ scores_list = []
+ origin_shape = im_shape / scale_factor
+ origin_shape = paddle.cast(origin_shape, 'int32')
+ for i, head_out in enumerate(yolo_head_out):
+ boxes, scores = ops.yolo_box(head_out, origin_shape, anchors[i],
+ self.num_classes, self.conf_thresh,
+ self.downsample_ratio // 2**i,
+ self.clip_bbox, self.scale_x_y)
+ boxes_list.append(boxes)
+ scores_list.append(paddle.transpose(scores, perm=[0, 2, 1]))
+ yolo_boxes = paddle.concat(boxes_list, axis=1)
+ yolo_scores = paddle.concat(scores_list, axis=2)
+ return yolo_boxes, yolo_scores
+
+
+@register
+@serializable
+class SSDBox(object):
+ def __init__(self, is_normalized=True):
+ self.is_normalized = is_normalized
+ self.norm_delta = float(not self.is_normalized)
+
+ def __call__(self,
+ preds,
+ prior_boxes,
+ im_shape,
+ scale_factor,
+ var_weight=None):
+ boxes, scores = preds
+ outputs = []
+ for box, score, prior_box in zip(boxes, scores, prior_boxes):
+ pb_w = prior_box[:, 2] - prior_box[:, 0] + self.norm_delta
+ pb_h = prior_box[:, 3] - prior_box[:, 1] + self.norm_delta
+ pb_x = prior_box[:, 0] + pb_w * 0.5
+ pb_y = prior_box[:, 1] + pb_h * 0.5
+ out_x = pb_x + box[:, :, 0] * pb_w * 0.1
+ out_y = pb_y + box[:, :, 1] * pb_h * 0.1
+ out_w = paddle.exp(box[:, :, 2] * 0.2) * pb_w
+ out_h = paddle.exp(box[:, :, 3] * 0.2) * pb_h
+
+ if self.is_normalized:
+ h = paddle.unsqueeze(
+ im_shape[:, 0] / scale_factor[:, 0], axis=-1)
+ w = paddle.unsqueeze(
+ im_shape[:, 1] / scale_factor[:, 1], axis=-1)
+ output = paddle.stack(
+ [(out_x - out_w / 2.) * w, (out_y - out_h / 2.) * h,
+ (out_x + out_w / 2.) * w, (out_y + out_h / 2.) * h],
+ axis=-1)
+ else:
+ output = paddle.stack(
+ [
+ out_x - out_w / 2., out_y - out_h / 2.,
+ out_x + out_w / 2. - 1., out_y + out_h / 2. - 1.
+ ],
+ axis=-1)
+ outputs.append(output)
+ boxes = paddle.concat(outputs, axis=1)
+
+ scores = F.softmax(paddle.concat(scores, axis=1))
+ scores = paddle.transpose(scores, [0, 2, 1])
+
+ return boxes, scores
+
+
+@register
+@serializable
+class AnchorGrid(object):
+ """Generate anchor grid
+
+ Args:
+ image_size (int or list): input image size, may be a single integer or
+ list of [h, w]. Default: 512
+ min_level (int): min level of the feature pyramid. Default: 3
+ max_level (int): max level of the feature pyramid. Default: 7
+ anchor_base_scale: base anchor scale. Default: 4
+ num_scales: number of anchor scales. Default: 3
+ aspect_ratios: aspect ratios. default: [[1, 1], [1.4, 0.7], [0.7, 1.4]]
+ """
+
+ def __init__(self,
+ image_size=512,
+ min_level=3,
+ max_level=7,
+ anchor_base_scale=4,
+ num_scales=3,
+ aspect_ratios=[[1, 1], [1.4, 0.7], [0.7, 1.4]]):
+ super(AnchorGrid, self).__init__()
+ if isinstance(image_size, Integral):
+ self.image_size = [image_size, image_size]
+ else:
+ self.image_size = image_size
+ for dim in self.image_size:
+ assert dim % 2 ** max_level == 0, \
+ "image size should be multiple of the max level stride"
+ self.min_level = min_level
+ self.max_level = max_level
+ self.anchor_base_scale = anchor_base_scale
+ self.num_scales = num_scales
+ self.aspect_ratios = aspect_ratios
+
+ @property
+ def base_cell(self):
+ if not hasattr(self, '_base_cell'):
+ self._base_cell = self.make_cell()
+ return self._base_cell
+
+ def make_cell(self):
+ scales = [2**(i / self.num_scales) for i in range(self.num_scales)]
+ scales = np.array(scales)
+ ratios = np.array(self.aspect_ratios)
+ ws = np.outer(scales, ratios[:, 0]).reshape(-1, 1)
+ hs = np.outer(scales, ratios[:, 1]).reshape(-1, 1)
+ anchors = np.hstack((-0.5 * ws, -0.5 * hs, 0.5 * ws, 0.5 * hs))
+ return anchors
+
+ def make_grid(self, stride):
+ cell = self.base_cell * stride * self.anchor_base_scale
+ x_steps = np.arange(stride // 2, self.image_size[1], stride)
+ y_steps = np.arange(stride // 2, self.image_size[0], stride)
+ offset_x, offset_y = np.meshgrid(x_steps, y_steps)
+ offset_x = offset_x.flatten()
+ offset_y = offset_y.flatten()
+ offsets = np.stack((offset_x, offset_y, offset_x, offset_y), axis=-1)
+ offsets = offsets[:, np.newaxis, :]
+ return (cell + offsets).reshape(-1, 4)
+
+ def generate(self):
+ return [
+ self.make_grid(2**l)
+ for l in range(self.min_level, self.max_level + 1)
+ ]
+
+ def __call__(self):
+ if not hasattr(self, '_anchor_vars'):
+ anchor_vars = []
+ helper = LayerHelper('anchor_grid')
+ for idx, l in enumerate(range(self.min_level, self.max_level + 1)):
+ stride = 2**l
+ anchors = self.make_grid(stride)
+ var = helper.create_parameter(
+ attr=ParamAttr(name='anchors_{}'.format(idx)),
+ shape=anchors.shape,
+ dtype='float32',
+ stop_gradient=True,
+ default_initializer=NumpyArrayInitializer(anchors))
+ anchor_vars.append(var)
+ var.persistable = True
+ self._anchor_vars = anchor_vars
+
+ return self._anchor_vars
+
+
+@register
+@serializable
+class FCOSBox(object):
+ __shared__ = ['num_classes']
+
+ def __init__(self, num_classes=80):
+ super(FCOSBox, self).__init__()
+ self.num_classes = num_classes
+
+ def _merge_hw(self, inputs, ch_type="channel_first"):
+ """
+ Merge h and w of the feature map into one dimension.
+ Args:
+ inputs (Tensor): Tensor of the input feature map
+ ch_type (str): "channel_first" or "channel_last" style
+ Return:
+ new_shape (Tensor): The new shape after h and w merged
+ """
+ shape_ = paddle.shape(inputs)
+ bs, ch, hi, wi = shape_[0], shape_[1], shape_[2], shape_[3]
+ img_size = hi * wi
+ img_size.stop_gradient = True
+ if ch_type == "channel_first":
+ new_shape = paddle.concat([bs, ch, img_size])
+ elif ch_type == "channel_last":
+ new_shape = paddle.concat([bs, img_size, ch])
+ else:
+ raise KeyError("Wrong ch_type %s" % ch_type)
+ new_shape.stop_gradient = True
+ return new_shape
+
+ def _postprocessing_by_level(self, locations, box_cls, box_reg, box_ctn,
+ scale_factor):
+ """
+ Postprocess each layer of the output with corresponding locations.
+ Args:
+ locations (Tensor): anchor points for current layer, [H*W, 2]
+ box_cls (Tensor): categories prediction, [N, C, H, W],
+ C is the number of classes
+ box_reg (Tensor): bounding box prediction, [N, 4, H, W]
+ box_ctn (Tensor): centerness prediction, [N, 1, H, W]
+ scale_factor (Tensor): [h_scale, w_scale] for input images
+ Return:
+ box_cls_ch_last (Tensor): score for each category, in [N, C, M]
+ C is the number of classes and M is the number of anchor points
+ box_reg_decoding (Tensor): decoded bounding box, in [N, M, 4]
+ last dimension is [x1, y1, x2, y2]
+ """
+ act_shape_cls = self._merge_hw(box_cls)
+ box_cls_ch_last = paddle.reshape(x=box_cls, shape=act_shape_cls)
+ box_cls_ch_last = F.sigmoid(box_cls_ch_last)
+
+ act_shape_reg = self._merge_hw(box_reg)
+ box_reg_ch_last = paddle.reshape(x=box_reg, shape=act_shape_reg)
+ box_reg_ch_last = paddle.transpose(box_reg_ch_last, perm=[0, 2, 1])
+ box_reg_decoding = paddle.stack(
+ [
+ locations[:, 0] - box_reg_ch_last[:, :, 0],
+ locations[:, 1] - box_reg_ch_last[:, :, 1],
+ locations[:, 0] + box_reg_ch_last[:, :, 2],
+ locations[:, 1] + box_reg_ch_last[:, :, 3]
+ ],
+ axis=1)
+ box_reg_decoding = paddle.transpose(box_reg_decoding, perm=[0, 2, 1])
+
+ act_shape_ctn = self._merge_hw(box_ctn)
+ box_ctn_ch_last = paddle.reshape(x=box_ctn, shape=act_shape_ctn)
+ box_ctn_ch_last = F.sigmoid(box_ctn_ch_last)
+
+ # recover the location to original image
+ im_scale = paddle.concat([scale_factor, scale_factor], axis=1)
+ im_scale = paddle.expand(im_scale, [box_reg_decoding.shape[0], 4])
+ im_scale = paddle.reshape(im_scale, [box_reg_decoding.shape[0], -1, 4])
+ box_reg_decoding = box_reg_decoding / im_scale
+ box_cls_ch_last = box_cls_ch_last * box_ctn_ch_last
+ return box_cls_ch_last, box_reg_decoding
+
+ def __call__(self, locations, cls_logits, bboxes_reg, centerness,
+ scale_factor):
+ pred_boxes_ = []
+ pred_scores_ = []
+ for pts, cls, box, ctn in zip(locations, cls_logits, bboxes_reg,
+ centerness):
+ pred_scores_lvl, pred_boxes_lvl = self._postprocessing_by_level(
+ pts, cls, box, ctn, scale_factor)
+ pred_boxes_.append(pred_boxes_lvl)
+ pred_scores_.append(pred_scores_lvl)
+ pred_boxes = paddle.concat(pred_boxes_, axis=1)
+ pred_scores = paddle.concat(pred_scores_, axis=2)
+ return pred_boxes, pred_scores
+
+
+@register
+class TTFBox(object):
+ __shared__ = ['down_ratio']
+
+ def __init__(self, max_per_img=100, score_thresh=0.01, down_ratio=4):
+ super(TTFBox, self).__init__()
+ self.max_per_img = max_per_img
+ self.score_thresh = score_thresh
+ self.down_ratio = down_ratio
+
+ def _simple_nms(self, heat, kernel=3):
+ """
+ Use maxpool to filter the max score, get local peaks.
+ """
+ pad = (kernel - 1) // 2
+ hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
+ keep = paddle.cast(hmax == heat, 'float32')
+ return heat * keep
+
+ def _topk(self, scores):
+ """
+ Select top k scores and decode to get xy coordinates.
+ """
+ k = self.max_per_img
+ shape_fm = paddle.shape(scores)
+ shape_fm.stop_gradient = True
+ cat, height, width = shape_fm[1], shape_fm[2], shape_fm[3]
+ # batch size is 1
+ scores_r = paddle.reshape(scores, [cat, -1])
+ topk_scores, topk_inds = paddle.topk(scores_r, k)
+ topk_scores, topk_inds = paddle.topk(scores_r, k)
+ topk_ys = topk_inds // width
+ topk_xs = topk_inds % width
+
+ topk_score_r = paddle.reshape(topk_scores, [-1])
+ topk_score, topk_ind = paddle.topk(topk_score_r, k)
+ k_t = paddle.full(paddle.shape(topk_ind), k, dtype='int64')
+ topk_clses = paddle.cast(paddle.floor_divide(topk_ind, k_t), 'float32')
+
+ topk_inds = paddle.reshape(topk_inds, [-1])
+ topk_ys = paddle.reshape(topk_ys, [-1, 1])
+ topk_xs = paddle.reshape(topk_xs, [-1, 1])
+ topk_inds = paddle.gather(topk_inds, topk_ind)
+ topk_ys = paddle.gather(topk_ys, topk_ind)
+ topk_xs = paddle.gather(topk_xs, topk_ind)
+
+ return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
+
+ def _decode(self, hm, wh, im_shape, scale_factor):
+ heatmap = F.sigmoid(hm)
+ heat = self._simple_nms(heatmap)
+ scores, inds, clses, ys, xs = self._topk(heat)
+ ys = paddle.cast(ys, 'float32') * self.down_ratio
+ xs = paddle.cast(xs, 'float32') * self.down_ratio
+ scores = paddle.tensor.unsqueeze(scores, [1])
+ clses = paddle.tensor.unsqueeze(clses, [1])
+
+ wh_t = paddle.transpose(wh, [0, 2, 3, 1])
+ wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
+ wh = paddle.gather(wh, inds)
+
+ x1 = xs - wh[:, 0:1]
+ y1 = ys - wh[:, 1:2]
+ x2 = xs + wh[:, 2:3]
+ y2 = ys + wh[:, 3:4]
+
+ bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
+
+ scale_y = scale_factor[:, 0:1]
+ scale_x = scale_factor[:, 1:2]
+ scale_expand = paddle.concat(
+ [scale_x, scale_y, scale_x, scale_y], axis=1)
+ boxes_shape = paddle.shape(bboxes)
+ boxes_shape.stop_gradient = True
+ scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
+ bboxes = paddle.divide(bboxes, scale_expand)
+ results = paddle.concat([clses, scores, bboxes], axis=1)
+ # hack: append result with cls=-1 and score=1. to avoid all scores
+ # are less than score_thresh which may cause error in gather.
+ fill_r = paddle.to_tensor(np.array([[-1, 1, 0, 0, 0, 0]]))
+ fill_r = paddle.cast(fill_r, results.dtype)
+ results = paddle.concat([results, fill_r])
+ scores = results[:, 1]
+ valid_ind = paddle.nonzero(scores > self.score_thresh)
+ results = paddle.gather(results, valid_ind)
+ return results, paddle.shape(results)[0:1]
+
+ def __call__(self, hm, wh, im_shape, scale_factor):
+ results = []
+ results_num = []
+ for i in range(scale_factor.shape[0]):
+ result, num = self._decode(hm[i:i + 1, ], wh[i:i + 1, ],
+ im_shape[i:i + 1, ],
+ scale_factor[i:i + 1, ])
+ results.append(result)
+ results_num.append(num)
+ results = paddle.concat(results, axis=0)
+ results_num = paddle.concat(results_num, axis=0)
+ return results, results_num
+
+
+@register
+@serializable
+class JDEBox(object):
+ __shared__ = ['num_classes']
+
+ def __init__(self, num_classes=1, conf_thresh=0.3, downsample_ratio=32):
+ self.num_classes = num_classes
+ self.conf_thresh = conf_thresh
+ self.downsample_ratio = downsample_ratio
+
+ def generate_anchor(self, nGh, nGw, anchor_wh):
+ nA = len(anchor_wh)
+ yv, xv = paddle.meshgrid([paddle.arange(nGh), paddle.arange(nGw)])
+ mesh = paddle.stack(
+ (xv, yv), axis=0).cast(dtype='float32') # 2 x nGh x nGw
+ meshs = paddle.tile(mesh, [nA, 1, 1, 1])
+
+ anchor_offset_mesh = anchor_wh[:, :, None][:, :, :, None].repeat(
+ int(nGh), axis=-2).repeat(
+ int(nGw), axis=-1)
+ anchor_offset_mesh = paddle.to_tensor(
+ anchor_offset_mesh.astype(np.float32))
+ # nA x 2 x nGh x nGw
+
+ anchor_mesh = paddle.concat([meshs, anchor_offset_mesh], axis=1)
+ anchor_mesh = paddle.transpose(anchor_mesh,
+ [0, 2, 3, 1]) # (nA x nGh x nGw) x 4
+ return anchor_mesh
+
+ def decode_delta(self, delta, fg_anchor_list):
+ px, py, pw, ph = fg_anchor_list[:, 0], fg_anchor_list[:,1], \
+ fg_anchor_list[:, 2], fg_anchor_list[:,3]
+ dx, dy, dw, dh = delta[:, 0], delta[:, 1], delta[:, 2], delta[:, 3]
+ gx = pw * dx + px
+ gy = ph * dy + py
+ gw = pw * paddle.exp(dw)
+ gh = ph * paddle.exp(dh)
+ gx1 = gx - gw * 0.5
+ gy1 = gy - gh * 0.5
+ gx2 = gx + gw * 0.5
+ gy2 = gy + gh * 0.5
+ return paddle.stack([gx1, gy1, gx2, gy2], axis=1)
+
+ def decode_delta_map(self, nA, nGh, nGw, delta_map, anchor_vec):
+ anchor_mesh = self.generate_anchor(nGh, nGw, anchor_vec)
+ anchor_mesh = paddle.unsqueeze(anchor_mesh, 0)
+ pred_list = self.decode_delta(
+ paddle.reshape(
+ delta_map, shape=[-1, 4]),
+ paddle.reshape(
+ anchor_mesh, shape=[-1, 4]))
+ pred_map = paddle.reshape(pred_list, shape=[nA * nGh * nGw, 4])
+ return pred_map
+
+ def _postprocessing_by_level(self, nA, stride, head_out, anchor_vec):
+ boxes_shape = head_out.shape # [nB, nA*6, nGh, nGw]
+ nGh, nGw = boxes_shape[-2], boxes_shape[-1]
+ nB = 1 # TODO: only support bs=1 now
+ boxes_list, scores_list = [], []
+ for idx in range(nB):
+ p = paddle.reshape(
+ head_out[idx], shape=[nA, self.num_classes + 5, nGh, nGw])
+ p = paddle.transpose(p, perm=[0, 2, 3, 1]) # [nA, nGh, nGw, 6]
+ delta_map = p[:, :, :, :4]
+ boxes = self.decode_delta_map(nA, nGh, nGw, delta_map, anchor_vec)
+ # [nA * nGh * nGw, 4]
+ boxes_list.append(boxes * stride)
+
+ p_conf = paddle.transpose(
+ p[:, :, :, 4:6], perm=[3, 0, 1, 2]) # [2, nA, nGh, nGw]
+ p_conf = F.softmax(
+ p_conf, axis=0)[1, :, :, :].unsqueeze(-1) # [nA, nGh, nGw, 1]
+ scores = paddle.reshape(p_conf, shape=[nA * nGh * nGw, 1])
+ scores_list.append(scores)
+
+ boxes_results = paddle.stack(boxes_list)
+ scores_results = paddle.stack(scores_list)
+ return boxes_results, scores_results
+
+ def __call__(self, yolo_head_out, anchors):
+ bbox_pred_list = []
+ for i, head_out in enumerate(yolo_head_out):
+ stride = self.downsample_ratio // 2**i
+ anc_w, anc_h = anchors[i][0::2], anchors[i][1::2]
+ anchor_vec = np.stack((anc_w, anc_h), axis=1) / stride
+ nA = len(anc_w)
+ boxes, scores = self._postprocessing_by_level(nA, stride, head_out,
+ anchor_vec)
+ bbox_pred_list.append(paddle.concat([boxes, scores], axis=-1))
+
+ yolo_boxes_scores = paddle.concat(bbox_pred_list, axis=1)
+ boxes_idx_over_conf_thr = paddle.nonzero(
+ yolo_boxes_scores[:, :, -1] > self.conf_thresh)
+ boxes_idx_over_conf_thr.stop_gradient = True
+
+ return boxes_idx_over_conf_thr, yolo_boxes_scores
+
+
+@register
+@serializable
+class MaskMatrixNMS(object):
+ """
+ Matrix NMS for multi-class masks.
+ Args:
+ update_threshold (float): Updated threshold of categroy score in second time.
+ pre_nms_top_n (int): Number of total instance to be kept per image before NMS
+ post_nms_top_n (int): Number of total instance to be kept per image after NMS.
+ kernel (str): 'linear' or 'gaussian'.
+ sigma (float): std in gaussian method.
+ Input:
+ seg_preds (Variable): shape (n, h, w), segmentation feature maps
+ seg_masks (Variable): shape (n, h, w), segmentation feature maps
+ cate_labels (Variable): shape (n), mask labels in descending order
+ cate_scores (Variable): shape (n), mask scores in descending order
+ sum_masks (Variable): a float tensor of the sum of seg_masks
+ Returns:
+ Variable: cate_scores, tensors of shape (n)
+ """
+
+ def __init__(self,
+ update_threshold=0.05,
+ pre_nms_top_n=500,
+ post_nms_top_n=100,
+ kernel='gaussian',
+ sigma=2.0):
+ super(MaskMatrixNMS, self).__init__()
+ self.update_threshold = update_threshold
+ self.pre_nms_top_n = pre_nms_top_n
+ self.post_nms_top_n = post_nms_top_n
+ self.kernel = kernel
+ self.sigma = sigma
+
+ def _sort_score(self, scores, top_num):
+ if paddle.shape(scores)[0] > top_num:
+ return paddle.topk(scores, top_num)[1]
+ else:
+ return paddle.argsort(scores, descending=True)
+
+ def __call__(self,
+ seg_preds,
+ seg_masks,
+ cate_labels,
+ cate_scores,
+ sum_masks=None):
+ # sort and keep top nms_pre
+ sort_inds = self._sort_score(cate_scores, self.pre_nms_top_n)
+ seg_masks = paddle.gather(seg_masks, index=sort_inds)
+ seg_preds = paddle.gather(seg_preds, index=sort_inds)
+ sum_masks = paddle.gather(sum_masks, index=sort_inds)
+ cate_scores = paddle.gather(cate_scores, index=sort_inds)
+ cate_labels = paddle.gather(cate_labels, index=sort_inds)
+
+ seg_masks = paddle.flatten(seg_masks, start_axis=1, stop_axis=-1)
+ # inter.
+ inter_matrix = paddle.mm(seg_masks, paddle.transpose(seg_masks, [1, 0]))
+ n_samples = paddle.shape(cate_labels)
+ # union.
+ sum_masks_x = paddle.expand(sum_masks, shape=[n_samples, n_samples])
+ # iou.
+ iou_matrix = (inter_matrix / (
+ sum_masks_x + paddle.transpose(sum_masks_x, [1, 0]) - inter_matrix))
+ iou_matrix = paddle.triu(iou_matrix, diagonal=1)
+ # label_specific matrix.
+ cate_labels_x = paddle.expand(cate_labels, shape=[n_samples, n_samples])
+ label_matrix = paddle.cast(
+ (cate_labels_x == paddle.transpose(cate_labels_x, [1, 0])),
+ 'float32')
+ label_matrix = paddle.triu(label_matrix, diagonal=1)
+
+ # IoU compensation
+ compensate_iou = paddle.max((iou_matrix * label_matrix), axis=0)
+ compensate_iou = paddle.expand(
+ compensate_iou, shape=[n_samples, n_samples])
+ compensate_iou = paddle.transpose(compensate_iou, [1, 0])
+
+ # IoU decay
+ decay_iou = iou_matrix * label_matrix
+
+ # matrix nms
+ if self.kernel == 'gaussian':
+ decay_matrix = paddle.exp(-1 * self.sigma * (decay_iou**2))
+ compensate_matrix = paddle.exp(-1 * self.sigma *
+ (compensate_iou**2))
+ decay_coefficient = paddle.min(decay_matrix / compensate_matrix,
+ axis=0)
+ elif self.kernel == 'linear':
+ decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
+ decay_coefficient = paddle.min(decay_matrix, axis=0)
+ else:
+ raise NotImplementedError
+
+ # update the score.
+ cate_scores = cate_scores * decay_coefficient
+ y = paddle.zeros(shape=paddle.shape(cate_scores), dtype='float32')
+ keep = paddle.where(cate_scores >= self.update_threshold, cate_scores,
+ y)
+ keep = paddle.nonzero(keep)
+ keep = paddle.squeeze(keep, axis=[1])
+ # Prevent empty and increase fake data
+ keep = paddle.concat(
+ [keep, paddle.cast(paddle.shape(cate_scores)[0] - 1, 'int64')])
+
+ seg_preds = paddle.gather(seg_preds, index=keep)
+ cate_scores = paddle.gather(cate_scores, index=keep)
+ cate_labels = paddle.gather(cate_labels, index=keep)
+
+ # sort and keep top_k
+ sort_inds = self._sort_score(cate_scores, self.post_nms_top_n)
+ seg_preds = paddle.gather(seg_preds, index=sort_inds)
+ cate_scores = paddle.gather(cate_scores, index=sort_inds)
+ cate_labels = paddle.gather(cate_labels, index=sort_inds)
+ return seg_preds, cate_scores, cate_labels
+
+
+def Conv2d(in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ weight_init=Normal(std=0.001),
+ bias_init=Constant(0.)):
+ weight_attr = paddle.framework.ParamAttr(initializer=weight_init)
+ if bias:
+ bias_attr = paddle.framework.ParamAttr(initializer=bias_init)
+ else:
+ bias_attr = False
+ conv = nn.Conv2D(
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ padding,
+ dilation,
+ groups,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr)
+ return conv
+
+
+def ConvTranspose2d(in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ output_padding=0,
+ groups=1,
+ bias=True,
+ dilation=1,
+ weight_init=Normal(std=0.001),
+ bias_init=Constant(0.)):
+ weight_attr = paddle.framework.ParamAttr(initializer=weight_init)
+ if bias:
+ bias_attr = paddle.framework.ParamAttr(initializer=bias_init)
+ else:
+ bias_attr = False
+ conv = nn.Conv2DTranspose(
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ padding,
+ output_padding,
+ dilation,
+ groups,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr)
+ return conv
+
+
+def BatchNorm2d(num_features, eps=1e-05, momentum=0.9, affine=True):
+ if not affine:
+ weight_attr = False
+ bias_attr = False
+ else:
+ weight_attr = None
+ bias_attr = None
+ batchnorm = nn.BatchNorm2D(
+ num_features,
+ momentum,
+ eps,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr)
+ return batchnorm
+
+
+def ReLU():
+ return nn.ReLU()
+
+
+def Upsample(scale_factor=None, mode='nearest', align_corners=False):
+ return nn.Upsample(None, scale_factor, mode, align_corners)
+
+
+def MaxPool(kernel_size, stride, padding, ceil_mode=False):
+ return nn.MaxPool2D(kernel_size, stride, padding, ceil_mode=ceil_mode)
+
+
+class Concat(nn.Layer):
+ def __init__(self, dim=0):
+ super(Concat, self).__init__()
+ self.dim = dim
+
+ def forward(self, inputs):
+ return paddle.concat(inputs, axis=self.dim)
+
+ def extra_repr(self):
+ return 'dim={}'.format(self.dim)
+
+
+def _convert_attention_mask(attn_mask, dtype):
+ """
+ Convert the attention mask to the target dtype we expect.
+ Parameters:
+ attn_mask (Tensor, optional): A tensor used in multi-head attention
+ to prevents attention to some unwanted positions, usually the
+ paddings or the subsequent positions. It is a tensor with shape
+ broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
+ When the data type is bool, the unwanted positions have `False`
+ values and the others have `True` values. When the data type is
+ int, the unwanted positions have 0 values and the others have 1
+ values. When the data type is float, the unwanted positions have
+ `-INF` values and the others have 0 values. It can be None when
+ nothing wanted or needed to be prevented attention to. Default None.
+ dtype (VarType): The target type of `attn_mask` we expect.
+ Returns:
+ Tensor: A Tensor with shape same as input `attn_mask`, with data type `dtype`.
+ """
+ return nn.layer.transformer._convert_attention_mask(attn_mask, dtype)
+
+
+class MultiHeadAttention(nn.Layer):
+ """
+ Attention mapps queries and a set of key-value pairs to outputs, and
+ Multi-Head Attention performs multiple parallel attention to jointly attending
+ to information from different representation subspaces.
+
+ Please refer to `Attention Is All You Need `_
+ for more details.
+
+ Parameters:
+ embed_dim (int): The expected feature size in the input and output.
+ num_heads (int): The number of heads in multi-head attention.
+ dropout (float, optional): The dropout probability used on attention
+ weights to drop some attention targets. 0 for no dropout. Default 0
+ kdim (int, optional): The feature size in key. If None, assumed equal to
+ `embed_dim`. Default None.
+ vdim (int, optional): The feature size in value. If None, assumed equal to
+ `embed_dim`. Default None.
+ need_weights (bool, optional): Indicate whether to return the attention
+ weights. Default False.
+
+ Examples:
+
+ .. code-block:: python
+
+ import paddle
+
+ # encoder input: [batch_size, sequence_length, d_model]
+ query = paddle.rand((2, 4, 128))
+ # self attention mask: [batch_size, num_heads, query_len, query_len]
+ attn_mask = paddle.rand((2, 2, 4, 4))
+ multi_head_attn = paddle.nn.MultiHeadAttention(128, 2)
+ output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128]
+ """
+
+ def __init__(self,
+ embed_dim,
+ num_heads,
+ dropout=0.,
+ kdim=None,
+ vdim=None,
+ need_weights=False):
+ super(MultiHeadAttention, self).__init__()
+ self.embed_dim = embed_dim
+ self.kdim = kdim if kdim is not None else embed_dim
+ self.vdim = vdim if vdim is not None else embed_dim
+ self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
+
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.need_weights = need_weights
+
+ self.head_dim = embed_dim // num_heads
+ assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
+
+ if self._qkv_same_embed_dim:
+ self.in_proj_weight = self.create_parameter(
+ shape=[embed_dim, 3 * embed_dim],
+ attr=None,
+ dtype=self._dtype,
+ is_bias=False)
+ self.in_proj_bias = self.create_parameter(
+ shape=[3 * embed_dim],
+ attr=None,
+ dtype=self._dtype,
+ is_bias=True)
+ else:
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
+ self.k_proj = nn.Linear(self.kdim, embed_dim)
+ self.v_proj = nn.Linear(self.vdim, embed_dim)
+
+ self.out_proj = nn.Linear(embed_dim, embed_dim)
+ self._type_list = ('q_proj', 'k_proj', 'v_proj')
+
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ for p in self.parameters():
+ if p.dim() > 1:
+ xavier_uniform_(p)
+ else:
+ constant_(p)
+
+ def compute_qkv(self, tensor, index):
+ if self._qkv_same_embed_dim:
+ tensor = F.linear(
+ x=tensor,
+ weight=self.in_proj_weight[:, index * self.embed_dim:(index + 1)
+ * self.embed_dim],
+ bias=self.in_proj_bias[index * self.embed_dim:(index + 1) *
+ self.embed_dim]
+ if self.in_proj_bias is not None else None)
+ else:
+ tensor = getattr(self, self._type_list[index])(tensor)
+ tensor = tensor.reshape(
+ [0, 0, self.num_heads, self.head_dim]).transpose([0, 2, 1, 3])
+ return tensor
+
+ def forward(self, query, key=None, value=None, attn_mask=None):
+ r"""
+ Applies multi-head attention to map queries and a set of key-value pairs
+ to outputs.
+
+ Parameters:
+ query (Tensor): The queries for multi-head attention. It is a
+ tensor with shape `[batch_size, query_length, embed_dim]`. The
+ data type should be float32 or float64.
+ key (Tensor, optional): The keys for multi-head attention. It is
+ a tensor with shape `[batch_size, key_length, kdim]`. The
+ data type should be float32 or float64. If None, use `query` as
+ `key`. Default None.
+ value (Tensor, optional): The values for multi-head attention. It
+ is a tensor with shape `[batch_size, value_length, vdim]`.
+ The data type should be float32 or float64. If None, use `query` as
+ `value`. Default None.
+ attn_mask (Tensor, optional): A tensor used in multi-head attention
+ to prevents attention to some unwanted positions, usually the
+ paddings or the subsequent positions. It is a tensor with shape
+ broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
+ When the data type is bool, the unwanted positions have `False`
+ values and the others have `True` values. When the data type is
+ int, the unwanted positions have 0 values and the others have 1
+ values. When the data type is float, the unwanted positions have
+ `-INF` values and the others have 0 values. It can be None when
+ nothing wanted or needed to be prevented attention to. Default None.
+
+ Returns:
+ Tensor|tuple: It is a tensor that has the same shape and data type \
+ as `query`, representing attention output. Or a tuple if \
+ `need_weights` is True or `cache` is not None. If `need_weights` \
+ is True, except for attention output, the tuple also includes \
+ the attention weights tensor shaped `[batch_size, num_heads, query_length, key_length]`. \
+ If `cache` is not None, the tuple then includes the new cache \
+ having the same type as `cache`, and if it is `StaticCache`, it \
+ is same as the input `cache`, if it is `Cache`, the new cache \
+ reserves tensors concatanating raw tensors with intermediate \
+ results of current query.
+ """
+ key = query if key is None else key
+ value = query if value is None else value
+ # compute q ,k ,v
+ q, k, v = (self.compute_qkv(t, i)
+ for i, t in enumerate([query, key, value]))
+
+ # scale dot product attention
+ product = paddle.matmul(x=q, y=k, transpose_y=True)
+ scaling = float(self.head_dim)**-0.5
+ product = product * scaling
+
+ if attn_mask is not None:
+ # Support bool or int mask
+ attn_mask = _convert_attention_mask(attn_mask, product.dtype)
+ product = product + attn_mask
+ weights = F.softmax(product)
+ if self.dropout:
+ weights = F.dropout(
+ weights,
+ self.dropout,
+ training=self.training,
+ mode="upscale_in_train")
+
+ out = paddle.matmul(weights, v)
+
+ # combine heads
+ out = paddle.transpose(out, perm=[0, 2, 1, 3])
+ out = paddle.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])
+
+ # project to output
+ out = self.out_proj(out)
+
+ outs = [out]
+ if self.need_weights:
+ outs.append(weights)
+ return out if len(outs) == 1 else tuple(outs)
+
+
+@register
+class ConvMixer(nn.Layer):
+ def __init__(
+ self,
+ dim,
+ depth,
+ kernel_size=3, ):
+ super().__init__()
+ self.dim = dim
+ self.depth = depth
+ self.kernel_size = kernel_size
+
+ self.mixer = self.conv_mixer(dim, depth, kernel_size)
+
+ def forward(self, x):
+ return self.mixer(x)
+
+ @staticmethod
+ def conv_mixer(
+ dim,
+ depth,
+ kernel_size, ):
+ Seq, ActBn = nn.Sequential, lambda x: Seq(x, nn.GELU(), nn.BatchNorm2D(dim))
+ Residual = type('Residual', (Seq, ),
+ {'forward': lambda self, x: self[0](x) + x})
+ return Seq(*[
+ Seq(Residual(
+ ActBn(
+ nn.Conv2D(
+ dim, dim, kernel_size, groups=dim, padding="same"))),
+ ActBn(nn.Conv2D(dim, dim, 1))) for i in range(depth)
+ ])
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__init__.py
new file mode 100644
index 000000000..83389c08e
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import yolo_loss
+from . import iou_aware_loss
+from . import iou_loss
+from . import ssd_loss
+from . import fcos_loss
+from . import solov2_loss
+from . import ctfocal_loss
+from . import keypoint_loss
+from . import jde_loss
+from . import fairmot_loss
+from . import gfocal_loss
+from . import detr_loss
+from . import sparsercnn_loss
+
+from .yolo_loss import *
+from .iou_aware_loss import *
+from .iou_loss import *
+from .ssd_loss import *
+from .fcos_loss import *
+from .solov2_loss import *
+from .ctfocal_loss import *
+from .keypoint_loss import *
+from .jde_loss import *
+from .fairmot_loss import *
+from .gfocal_loss import *
+from .detr_loss import *
+from .sparsercnn_loss import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..f19224ba0
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/ctfocal_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/ctfocal_loss.cpython-37.pyc
new file mode 100644
index 000000000..093a6a7a2
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/ctfocal_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/detr_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/detr_loss.cpython-37.pyc
new file mode 100644
index 000000000..998b42ff4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/detr_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/fairmot_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/fairmot_loss.cpython-37.pyc
new file mode 100644
index 000000000..1b031f7a9
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/fairmot_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/fcos_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/fcos_loss.cpython-37.pyc
new file mode 100644
index 000000000..4aa367a0f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/fcos_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/gfocal_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/gfocal_loss.cpython-37.pyc
new file mode 100644
index 000000000..bea48b9bc
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/gfocal_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/iou_aware_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/iou_aware_loss.cpython-37.pyc
new file mode 100644
index 000000000..9f06e288c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/iou_aware_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/iou_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/iou_loss.cpython-37.pyc
new file mode 100644
index 000000000..524270db2
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/iou_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/jde_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/jde_loss.cpython-37.pyc
new file mode 100644
index 000000000..aae1c7aef
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/jde_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/keypoint_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/keypoint_loss.cpython-37.pyc
new file mode 100644
index 000000000..696189238
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/keypoint_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/solov2_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/solov2_loss.cpython-37.pyc
new file mode 100644
index 000000000..84bc2fcda
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/solov2_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/sparsercnn_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/sparsercnn_loss.cpython-37.pyc
new file mode 100644
index 000000000..c791e49cb
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/sparsercnn_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/ssd_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/ssd_loss.cpython-37.pyc
new file mode 100644
index 000000000..f2fd2b9c3
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/ssd_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/varifocal_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/varifocal_loss.cpython-37.pyc
new file mode 100644
index 000000000..bf1a6509e
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/varifocal_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/yolo_loss.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/yolo_loss.cpython-37.pyc
new file mode 100644
index 000000000..18a2ae010
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/__pycache__/yolo_loss.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/ctfocal_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/ctfocal_loss.py
new file mode 100644
index 000000000..dd00eb854
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/ctfocal_loss.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+
+from ppdet.core.workspace import register, serializable
+
+__all__ = ['CTFocalLoss']
+
+
+@register
+@serializable
+class CTFocalLoss(object):
+ """
+ CTFocalLoss: CornerNet & CenterNet Focal Loss
+ Args:
+ loss_weight (float): loss weight
+ gamma (float): gamma parameter for Focal Loss
+ """
+
+ def __init__(self, loss_weight=1., gamma=2.0):
+ self.loss_weight = loss_weight
+ self.gamma = gamma
+
+ def __call__(self, pred, target):
+ """
+ Calculate the loss
+ Args:
+ pred (Tensor): heatmap prediction
+ target (Tensor): target for positive samples
+ Return:
+ ct_focal_loss (Tensor): Focal Loss used in CornerNet & CenterNet.
+ Note that the values in target are in [0, 1] since gaussian is
+ used to reduce the punishment and we treat [0, 1) as neg example.
+ """
+ fg_map = paddle.cast(target == 1, 'float32')
+ fg_map.stop_gradient = True
+ bg_map = paddle.cast(target < 1, 'float32')
+ bg_map.stop_gradient = True
+
+ neg_weights = paddle.pow(1 - target, 4)
+ pos_loss = 0 - paddle.log(pred) * paddle.pow(1 - pred,
+ self.gamma) * fg_map
+
+ neg_loss = 0 - paddle.log(1 - pred) * paddle.pow(
+ pred, self.gamma) * neg_weights * bg_map
+ pos_loss = paddle.sum(pos_loss)
+ neg_loss = paddle.sum(neg_loss)
+
+ fg_num = paddle.sum(fg_map)
+ ct_focal_loss = (pos_loss + neg_loss) / (
+ fg_num + paddle.cast(fg_num == 0, 'float32'))
+ return ct_focal_loss * self.loss_weight
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/detr_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/detr_loss.py
new file mode 100644
index 000000000..5a589d4a2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/detr_loss.py
@@ -0,0 +1,230 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register
+from .iou_loss import GIoULoss
+from ..transformers import bbox_cxcywh_to_xyxy, sigmoid_focal_loss
+
+__all__ = ['DETRLoss']
+
+
+@register
+class DETRLoss(nn.Layer):
+ __shared__ = ['num_classes', 'use_focal_loss']
+ __inject__ = ['matcher']
+
+ def __init__(self,
+ num_classes=80,
+ matcher='HungarianMatcher',
+ loss_coeff={
+ 'class': 1,
+ 'bbox': 5,
+ 'giou': 2,
+ 'no_object': 0.1,
+ 'mask': 1,
+ 'dice': 1
+ },
+ aux_loss=True,
+ use_focal_loss=False):
+ r"""
+ Args:
+ num_classes (int): The number of classes.
+ matcher (HungarianMatcher): It computes an assignment between the targets
+ and the predictions of the network.
+ loss_coeff (dict): The coefficient of loss.
+ aux_loss (bool): If 'aux_loss = True', loss at each decoder layer are to be used.
+ use_focal_loss (bool): Use focal loss or not.
+ """
+ super(DETRLoss, self).__init__()
+ self.num_classes = num_classes
+
+ self.matcher = matcher
+ self.loss_coeff = loss_coeff
+ self.aux_loss = aux_loss
+ self.use_focal_loss = use_focal_loss
+
+ if not self.use_focal_loss:
+ self.loss_coeff['class'] = paddle.full([num_classes + 1],
+ loss_coeff['class'])
+ self.loss_coeff['class'][-1] = loss_coeff['no_object']
+ self.giou_loss = GIoULoss()
+
+ def _get_loss_class(self, logits, gt_class, match_indices, bg_index,
+ num_gts):
+ # logits: [b, query, num_classes], gt_class: list[[n, 1]]
+ target_label = paddle.full(logits.shape[:2], bg_index, dtype='int64')
+ bs, num_query_objects = target_label.shape
+ if sum(len(a) for a in gt_class) > 0:
+ index, updates = self._get_index_updates(num_query_objects,
+ gt_class, match_indices)
+ target_label = paddle.scatter(
+ target_label.reshape([-1, 1]), index, updates.astype('int64'))
+ target_label = target_label.reshape([bs, num_query_objects])
+ if self.use_focal_loss:
+ target_label = F.one_hot(target_label,
+ self.num_classes + 1)[:, :, :-1]
+ return {
+ 'loss_class': self.loss_coeff['class'] * sigmoid_focal_loss(
+ logits, target_label, num_gts / num_query_objects)
+ if self.use_focal_loss else F.cross_entropy(
+ logits, target_label, weight=self.loss_coeff['class'])
+ }
+
+ def _get_loss_bbox(self, boxes, gt_bbox, match_indices, num_gts):
+ # boxes: [b, query, 4], gt_bbox: list[[n, 4]]
+ loss = dict()
+ if sum(len(a) for a in gt_bbox) == 0:
+ loss['loss_bbox'] = paddle.to_tensor([0.])
+ loss['loss_giou'] = paddle.to_tensor([0.])
+ return loss
+
+ src_bbox, target_bbox = self._get_src_target_assign(boxes, gt_bbox,
+ match_indices)
+ loss['loss_bbox'] = self.loss_coeff['bbox'] * F.l1_loss(
+ src_bbox, target_bbox, reduction='sum') / num_gts
+ loss['loss_giou'] = self.giou_loss(
+ bbox_cxcywh_to_xyxy(src_bbox), bbox_cxcywh_to_xyxy(target_bbox))
+ loss['loss_giou'] = loss['loss_giou'].sum() / num_gts
+ loss['loss_giou'] = self.loss_coeff['giou'] * loss['loss_giou']
+ return loss
+
+ def _get_loss_mask(self, masks, gt_mask, match_indices, num_gts):
+ # masks: [b, query, h, w], gt_mask: list[[n, H, W]]
+ loss = dict()
+ if sum(len(a) for a in gt_mask) == 0:
+ loss['loss_mask'] = paddle.to_tensor([0.])
+ loss['loss_dice'] = paddle.to_tensor([0.])
+ return loss
+
+ src_masks, target_masks = self._get_src_target_assign(masks, gt_mask,
+ match_indices)
+ src_masks = F.interpolate(
+ src_masks.unsqueeze(0),
+ size=target_masks.shape[-2:],
+ mode="bilinear")[0]
+ loss['loss_mask'] = self.loss_coeff['mask'] * F.sigmoid_focal_loss(
+ src_masks,
+ target_masks,
+ paddle.to_tensor(
+ [num_gts], dtype='float32'))
+ loss['loss_dice'] = self.loss_coeff['dice'] * self._dice_loss(
+ src_masks, target_masks, num_gts)
+ return loss
+
+ def _dice_loss(self, inputs, targets, num_gts):
+ inputs = F.sigmoid(inputs)
+ inputs = inputs.flatten(1)
+ targets = targets.flatten(1)
+ numerator = 2 * (inputs * targets).sum(1)
+ denominator = inputs.sum(-1) + targets.sum(-1)
+ loss = 1 - (numerator + 1) / (denominator + 1)
+ return loss.sum() / num_gts
+
+ def _get_loss_aux(self, boxes, logits, gt_bbox, gt_class, bg_index,
+ num_gts):
+ loss_class = []
+ loss_bbox = []
+ loss_giou = []
+ for aux_boxes, aux_logits in zip(boxes, logits):
+ match_indices = self.matcher(aux_boxes, aux_logits, gt_bbox,
+ gt_class)
+ loss_class.append(
+ self._get_loss_class(aux_logits, gt_class, match_indices,
+ bg_index, num_gts)['loss_class'])
+ loss_ = self._get_loss_bbox(aux_boxes, gt_bbox, match_indices,
+ num_gts)
+ loss_bbox.append(loss_['loss_bbox'])
+ loss_giou.append(loss_['loss_giou'])
+ loss = {
+ 'loss_class_aux': paddle.add_n(loss_class),
+ 'loss_bbox_aux': paddle.add_n(loss_bbox),
+ 'loss_giou_aux': paddle.add_n(loss_giou)
+ }
+ return loss
+
+ def _get_index_updates(self, num_query_objects, target, match_indices):
+ batch_idx = paddle.concat([
+ paddle.full_like(src, i) for i, (src, _) in enumerate(match_indices)
+ ])
+ src_idx = paddle.concat([src for (src, _) in match_indices])
+ src_idx += (batch_idx * num_query_objects)
+ target_assign = paddle.concat([
+ paddle.gather(
+ t, dst, axis=0) for t, (_, dst) in zip(target, match_indices)
+ ])
+ return src_idx, target_assign
+
+ def _get_src_target_assign(self, src, target, match_indices):
+ src_assign = paddle.concat([
+ paddle.gather(
+ t, I, axis=0) if len(I) > 0 else paddle.zeros([0, t.shape[-1]])
+ for t, (I, _) in zip(src, match_indices)
+ ])
+ target_assign = paddle.concat([
+ paddle.gather(
+ t, J, axis=0) if len(J) > 0 else paddle.zeros([0, t.shape[-1]])
+ for t, (_, J) in zip(target, match_indices)
+ ])
+ return src_assign, target_assign
+
+ def forward(self,
+ boxes,
+ logits,
+ gt_bbox,
+ gt_class,
+ masks=None,
+ gt_mask=None):
+ r"""
+ Args:
+ boxes (Tensor): [l, b, query, 4]
+ logits (Tensor): [l, b, query, num_classes]
+ gt_bbox (List(Tensor)): list[[n, 4]]
+ gt_class (List(Tensor)): list[[n, 1]]
+ masks (Tensor, optional): [b, query, h, w]
+ gt_mask (List(Tensor), optional): list[[n, H, W]]
+ """
+ match_indices = self.matcher(boxes[-1].detach(), logits[-1].detach(),
+ gt_bbox, gt_class)
+ num_gts = sum(len(a) for a in gt_bbox)
+ try:
+ # TODO: Paddle does not have a "paddle.distributed.is_initialized()"
+ num_gts = paddle.to_tensor([num_gts], dtype=paddle.float32)
+ paddle.distributed.all_reduce(num_gts)
+ num_gts = paddle.clip(
+ num_gts / paddle.distributed.get_world_size(), min=1).item()
+ except:
+ num_gts = max(num_gts.item(), 1)
+ total_loss = dict()
+ total_loss.update(
+ self._get_loss_class(logits[-1], gt_class, match_indices,
+ self.num_classes, num_gts))
+ total_loss.update(
+ self._get_loss_bbox(boxes[-1], gt_bbox, match_indices, num_gts))
+ if masks is not None and gt_mask is not None:
+ total_loss.update(
+ self._get_loss_mask(masks, gt_mask, match_indices, num_gts))
+
+ if self.aux_loss:
+ total_loss.update(
+ self._get_loss_aux(boxes[:-1], logits[:-1], gt_bbox, gt_class,
+ self.num_classes, num_gts))
+
+ return total_loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/fairmot_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/fairmot_loss.py
new file mode 100644
index 000000000..e24ff33fe
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/fairmot_loss.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+from paddle.nn.initializer import Constant
+from ppdet.core.workspace import register
+
+__all__ = ['FairMOTLoss']
+
+
+@register
+class FairMOTLoss(nn.Layer):
+ def __init__(self):
+ super(FairMOTLoss, self).__init__()
+ self.det_weight = self.create_parameter(
+ shape=[1], default_initializer=Constant(-1.85))
+ self.reid_weight = self.create_parameter(
+ shape=[1], default_initializer=Constant(-1.05))
+
+ def forward(self, det_loss, reid_loss):
+ loss = paddle.exp(-self.det_weight) * det_loss + paddle.exp(
+ -self.reid_weight) * reid_loss + (self.det_weight + self.reid_weight
+ )
+ loss *= 0.5
+ return {'loss': loss}
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/fcos_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/fcos_loss.py
new file mode 100644
index 000000000..c8d600573
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/fcos_loss.py
@@ -0,0 +1,225 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register
+from ppdet.modeling import ops
+
+__all__ = ['FCOSLoss']
+
+
+def flatten_tensor(inputs, channel_first=False):
+ """
+ Flatten a Tensor
+ Args:
+ inputs (Tensor): 4-D Tensor with shape [N, C, H, W] or [N, H, W, C]
+ channel_first (bool): If true the dimension order of Tensor is
+ [N, C, H, W], otherwise is [N, H, W, C]
+ Return:
+ output_channel_last (Tensor): The flattened Tensor in channel_last style
+ """
+ if channel_first:
+ input_channel_last = paddle.transpose(inputs, perm=[0, 2, 3, 1])
+ else:
+ input_channel_last = inputs
+ output_channel_last = paddle.flatten(
+ input_channel_last, start_axis=0, stop_axis=2)
+ return output_channel_last
+
+
+@register
+class FCOSLoss(nn.Layer):
+ """
+ FCOSLoss
+ Args:
+ loss_alpha (float): alpha in focal loss
+ loss_gamma (float): gamma in focal loss
+ iou_loss_type (str): location loss type, IoU/GIoU/LINEAR_IoU
+ reg_weights (float): weight for location loss
+ """
+
+ def __init__(self,
+ loss_alpha=0.25,
+ loss_gamma=2.0,
+ iou_loss_type="giou",
+ reg_weights=1.0):
+ super(FCOSLoss, self).__init__()
+ self.loss_alpha = loss_alpha
+ self.loss_gamma = loss_gamma
+ self.iou_loss_type = iou_loss_type
+ self.reg_weights = reg_weights
+
+ def __iou_loss(self, pred, targets, positive_mask, weights=None):
+ """
+ Calculate the loss for location prediction
+ Args:
+ pred (Tensor): bounding boxes prediction
+ targets (Tensor): targets for positive samples
+ positive_mask (Tensor): mask of positive samples
+ weights (Tensor): weights for each positive samples
+ Return:
+ loss (Tensor): location loss
+ """
+ plw = pred[:, 0] * positive_mask
+ pth = pred[:, 1] * positive_mask
+ prw = pred[:, 2] * positive_mask
+ pbh = pred[:, 3] * positive_mask
+
+ tlw = targets[:, 0] * positive_mask
+ tth = targets[:, 1] * positive_mask
+ trw = targets[:, 2] * positive_mask
+ tbh = targets[:, 3] * positive_mask
+ tlw.stop_gradient = True
+ trw.stop_gradient = True
+ tth.stop_gradient = True
+ tbh.stop_gradient = True
+
+ ilw = paddle.minimum(plw, tlw)
+ irw = paddle.minimum(prw, trw)
+ ith = paddle.minimum(pth, tth)
+ ibh = paddle.minimum(pbh, tbh)
+
+ clw = paddle.maximum(plw, tlw)
+ crw = paddle.maximum(prw, trw)
+ cth = paddle.maximum(pth, tth)
+ cbh = paddle.maximum(pbh, tbh)
+
+ area_predict = (plw + prw) * (pth + pbh)
+ area_target = (tlw + trw) * (tth + tbh)
+ area_inter = (ilw + irw) * (ith + ibh)
+ ious = (area_inter + 1.0) / (
+ area_predict + area_target - area_inter + 1.0)
+ ious = ious * positive_mask
+
+ if self.iou_loss_type.lower() == "linear_iou":
+ loss = 1.0 - ious
+ elif self.iou_loss_type.lower() == "giou":
+ area_uniou = area_predict + area_target - area_inter
+ area_circum = (clw + crw) * (cth + cbh) + 1e-7
+ giou = ious - (area_circum - area_uniou) / area_circum
+ loss = 1.0 - giou
+ elif self.iou_loss_type.lower() == "iou":
+ loss = 0.0 - paddle.log(ious)
+ else:
+ raise KeyError
+ if weights is not None:
+ loss = loss * weights
+ return loss
+
+ def forward(self, cls_logits, bboxes_reg, centerness, tag_labels,
+ tag_bboxes, tag_center):
+ """
+ Calculate the loss for classification, location and centerness
+ Args:
+ cls_logits (list): list of Tensor, which is predicted
+ score for all anchor points with shape [N, M, C]
+ bboxes_reg (list): list of Tensor, which is predicted
+ offsets for all anchor points with shape [N, M, 4]
+ centerness (list): list of Tensor, which is predicted
+ centerness for all anchor points with shape [N, M, 1]
+ tag_labels (list): list of Tensor, which is category
+ targets for each anchor point
+ tag_bboxes (list): list of Tensor, which is bounding
+ boxes targets for positive samples
+ tag_center (list): list of Tensor, which is centerness
+ targets for positive samples
+ Return:
+ loss (dict): loss composed by classification loss, bounding box
+ """
+ cls_logits_flatten_list = []
+ bboxes_reg_flatten_list = []
+ centerness_flatten_list = []
+ tag_labels_flatten_list = []
+ tag_bboxes_flatten_list = []
+ tag_center_flatten_list = []
+ num_lvl = len(cls_logits)
+ for lvl in range(num_lvl):
+ cls_logits_flatten_list.append(
+ flatten_tensor(cls_logits[lvl], True))
+ bboxes_reg_flatten_list.append(
+ flatten_tensor(bboxes_reg[lvl], True))
+ centerness_flatten_list.append(
+ flatten_tensor(centerness[lvl], True))
+
+ tag_labels_flatten_list.append(
+ flatten_tensor(tag_labels[lvl], False))
+ tag_bboxes_flatten_list.append(
+ flatten_tensor(tag_bboxes[lvl], False))
+ tag_center_flatten_list.append(
+ flatten_tensor(tag_center[lvl], False))
+
+ cls_logits_flatten = paddle.concat(cls_logits_flatten_list, axis=0)
+ bboxes_reg_flatten = paddle.concat(bboxes_reg_flatten_list, axis=0)
+ centerness_flatten = paddle.concat(centerness_flatten_list, axis=0)
+
+ tag_labels_flatten = paddle.concat(tag_labels_flatten_list, axis=0)
+ tag_bboxes_flatten = paddle.concat(tag_bboxes_flatten_list, axis=0)
+ tag_center_flatten = paddle.concat(tag_center_flatten_list, axis=0)
+ tag_labels_flatten.stop_gradient = True
+ tag_bboxes_flatten.stop_gradient = True
+ tag_center_flatten.stop_gradient = True
+
+ mask_positive_bool = tag_labels_flatten > 0
+ mask_positive_bool.stop_gradient = True
+ mask_positive_float = paddle.cast(mask_positive_bool, dtype="float32")
+ mask_positive_float.stop_gradient = True
+
+ num_positive_fp32 = paddle.sum(mask_positive_float)
+ num_positive_fp32.stop_gradient = True
+ num_positive_int32 = paddle.cast(num_positive_fp32, dtype="int32")
+ num_positive_int32 = num_positive_int32 * 0 + 1
+ num_positive_int32.stop_gradient = True
+
+ normalize_sum = paddle.sum(tag_center_flatten * mask_positive_float)
+ normalize_sum.stop_gradient = True
+
+ # 1. cls_logits: sigmoid_focal_loss
+ # expand onehot labels
+ num_classes = cls_logits_flatten.shape[-1]
+ tag_labels_flatten = paddle.squeeze(tag_labels_flatten, axis=-1)
+ tag_labels_flatten_bin = F.one_hot(
+ tag_labels_flatten, num_classes=1 + num_classes)
+ tag_labels_flatten_bin = tag_labels_flatten_bin[:, 1:]
+ # sigmoid_focal_loss
+ cls_loss = F.sigmoid_focal_loss(
+ cls_logits_flatten, tag_labels_flatten_bin) / num_positive_fp32
+
+ # 2. bboxes_reg: giou_loss
+ mask_positive_float = paddle.squeeze(mask_positive_float, axis=-1)
+ tag_center_flatten = paddle.squeeze(tag_center_flatten, axis=-1)
+ reg_loss = self.__iou_loss(
+ bboxes_reg_flatten,
+ tag_bboxes_flatten,
+ mask_positive_float,
+ weights=tag_center_flatten)
+ reg_loss = reg_loss * mask_positive_float / normalize_sum
+
+ # 3. centerness: sigmoid_cross_entropy_with_logits_loss
+ centerness_flatten = paddle.squeeze(centerness_flatten, axis=-1)
+ ctn_loss = ops.sigmoid_cross_entropy_with_logits(centerness_flatten,
+ tag_center_flatten)
+ ctn_loss = ctn_loss * mask_positive_float / num_positive_fp32
+
+ loss_all = {
+ "loss_centerness": paddle.sum(ctn_loss),
+ "loss_cls": paddle.sum(cls_loss),
+ "loss_box": paddle.sum(reg_loss)
+ }
+ return loss_all
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/gfocal_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/gfocal_loss.py
new file mode 100644
index 000000000..37e27f084
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/gfocal_loss.py
@@ -0,0 +1,217 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on:
+# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/gfocal_loss.py
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import numpy as np
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register, serializable
+from ppdet.modeling import ops
+
+__all__ = ['QualityFocalLoss', 'DistributionFocalLoss']
+
+
+def quality_focal_loss(pred, target, beta=2.0, use_sigmoid=True):
+ """
+ Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
+ Qualified and Distributed Bounding Boxes for Dense Object Detection
+ `_.
+ Args:
+ pred (Tensor): Predicted joint representation of classification
+ and quality (IoU) estimation with shape (N, C), C is the number of
+ classes.
+ target (tuple([Tensor])): Target category label with shape (N,)
+ and target quality label with shape (N,).
+ beta (float): The beta parameter for calculating the modulating factor.
+ Defaults to 2.0.
+ Returns:
+ Tensor: Loss tensor with shape (N,).
+ """
+ assert len(target) == 2, """target for QFL must be a tuple of two elements,
+ including category label and quality label, respectively"""
+ # label denotes the category id, score denotes the quality score
+ label, score = target
+ if use_sigmoid:
+ func = F.binary_cross_entropy_with_logits
+ else:
+ func = F.binary_cross_entropy
+
+ # negatives are supervised by 0 quality score
+ pred_sigmoid = F.sigmoid(pred) if use_sigmoid else pred
+ scale_factor = pred_sigmoid
+ zerolabel = paddle.zeros(pred.shape, dtype='float32')
+ loss = func(pred, zerolabel, reduction='none') * scale_factor.pow(beta)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = pred.shape[1]
+ pos = paddle.logical_and((label >= 0),
+ (label < bg_class_ind)).nonzero().squeeze(1)
+ if pos.shape[0] == 0:
+ return loss.sum(axis=1)
+ pos_label = paddle.gather(label, pos, axis=0)
+ pos_mask = np.zeros(pred.shape, dtype=np.int32)
+ pos_mask[pos.numpy(), pos_label.numpy()] = 1
+ pos_mask = paddle.to_tensor(pos_mask, dtype='bool')
+ score = score.unsqueeze(-1).expand([-1, pred.shape[1]]).cast('float32')
+ # positives are supervised by bbox quality (IoU) score
+ scale_factor_new = score - pred_sigmoid
+
+ loss_pos = func(
+ pred, score, reduction='none') * scale_factor_new.abs().pow(beta)
+ loss = loss * paddle.logical_not(pos_mask) + loss_pos * pos_mask
+ loss = loss.sum(axis=1)
+ return loss
+
+
+def distribution_focal_loss(pred, label):
+ """Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
+ Qualified and Distributed Bounding Boxes for Dense Object Detection
+ `_.
+ Args:
+ pred (Tensor): Predicted general distribution of bounding boxes
+ (before softmax) with shape (N, n+1), n is the max value of the
+ integral set `{0, ..., n}` in paper.
+ label (Tensor): Target distance label for bounding boxes with
+ shape (N,).
+ Returns:
+ Tensor: Loss tensor with shape (N,).
+ """
+ dis_left = label.cast('int64')
+ dis_right = dis_left + 1
+ weight_left = dis_right.cast('float32') - label
+ weight_right = label - dis_left.cast('float32')
+ loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ + F.cross_entropy(pred, dis_right, reduction='none') * weight_right
+ return loss
+
+
+@register
+@serializable
+class QualityFocalLoss(nn.Layer):
+ r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
+ Learning Qualified and Distributed Bounding Boxes for Dense Object
+ Detection `_.
+ Args:
+ use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
+ Defaults to True.
+ beta (float): The beta parameter for calculating the modulating factor.
+ Defaults to 2.0.
+ reduction (str): Options are "none", "mean" and "sum".
+ loss_weight (float): Loss weight of current loss.
+ """
+
+ def __init__(self,
+ use_sigmoid=True,
+ beta=2.0,
+ reduction='mean',
+ loss_weight=1.0):
+ super(QualityFocalLoss, self).__init__()
+ self.use_sigmoid = use_sigmoid
+ self.beta = beta
+ assert reduction in ('none', 'mean', 'sum')
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self, pred, target, weight=None, avg_factor=None):
+ """Forward function.
+ Args:
+ pred (Tensor): Predicted joint representation of
+ classification and quality (IoU) estimation with shape (N, C),
+ C is the number of classes.
+ target (tuple([Tensor])): Target category label with shape
+ (N,) and target quality label with shape (N,).
+ weight (Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ """
+
+ loss = self.loss_weight * quality_focal_loss(
+ pred, target, beta=self.beta, use_sigmoid=self.use_sigmoid)
+
+ if weight is not None:
+ loss = loss * weight
+ if avg_factor is None:
+ if self.reduction == 'none':
+ return loss
+ elif self.reduction == 'mean':
+ return loss.mean()
+ elif self.reduction == 'sum':
+ return loss.sum()
+ else:
+ # if reduction is mean, then average the loss by avg_factor
+ if self.reduction == 'mean':
+ loss = loss.sum() / avg_factor
+ # if reduction is 'none', then do nothing, otherwise raise an error
+ elif self.reduction != 'none':
+ raise ValueError(
+ 'avg_factor can not be used with reduction="sum"')
+ return loss
+
+
+@register
+@serializable
+class DistributionFocalLoss(nn.Layer):
+ """Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
+ Learning Qualified and Distributed Bounding Boxes for Dense Object
+ Detection `_.
+ Args:
+ reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
+ loss_weight (float): Loss weight of current loss.
+ """
+
+ def __init__(self, reduction='mean', loss_weight=1.0):
+ super(DistributionFocalLoss, self).__init__()
+ assert reduction in ('none', 'mean', 'sum')
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self, pred, target, weight=None, avg_factor=None):
+ """Forward function.
+ Args:
+ pred (Tensor): Predicted general distribution of bounding
+ boxes (before softmax) with shape (N, n+1), n is the max value
+ of the integral set `{0, ..., n}` in paper.
+ target (Tensor): Target distance label for bounding boxes
+ with shape (N,).
+ weight (Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ """
+ loss = self.loss_weight * distribution_focal_loss(pred, target)
+ if weight is not None:
+ loss = loss * weight
+ if avg_factor is None:
+ if self.reduction == 'none':
+ return loss
+ elif self.reduction == 'mean':
+ return loss.mean()
+ elif self.reduction == 'sum':
+ return loss.sum()
+ else:
+ # if reduction is mean, then average the loss by avg_factor
+ if self.reduction == 'mean':
+ loss = loss.sum() / avg_factor
+ # if reduction is 'none', then do nothing, otherwise raise an error
+ elif self.reduction != 'none':
+ raise ValueError(
+ 'avg_factor can not be used with reduction="sum"')
+ return loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/iou_aware_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/iou_aware_loss.py
new file mode 100644
index 000000000..4a9e904dd
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/iou_aware_loss.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle.nn.functional as F
+from ppdet.core.workspace import register, serializable
+from .iou_loss import IouLoss
+from ..bbox_utils import bbox_iou
+
+
+@register
+@serializable
+class IouAwareLoss(IouLoss):
+ """
+ iou aware loss, see https://arxiv.org/abs/1912.05992
+ Args:
+ loss_weight (float): iou aware loss weight, default is 1.0
+ max_height (int): max height of input to support random shape input
+ max_width (int): max width of input to support random shape input
+ """
+
+ def __init__(self, loss_weight=1.0, giou=False, diou=False, ciou=False):
+ super(IouAwareLoss, self).__init__(
+ loss_weight=loss_weight, giou=giou, diou=diou, ciou=ciou)
+
+ def __call__(self, ioup, pbox, gbox):
+ iou = bbox_iou(
+ pbox, gbox, giou=self.giou, diou=self.diou, ciou=self.ciou)
+ iou.stop_gradient = True
+ loss_iou_aware = F.binary_cross_entropy_with_logits(
+ ioup, iou, reduction='none')
+ loss_iou_aware = loss_iou_aware * self.loss_weight
+ return loss_iou_aware
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/iou_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/iou_loss.py
new file mode 100644
index 000000000..9b8da6c05
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/iou_loss.py
@@ -0,0 +1,210 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+import paddle
+
+from ppdet.core.workspace import register, serializable
+from ..bbox_utils import bbox_iou
+
+__all__ = ['IouLoss', 'GIoULoss', 'DIouLoss']
+
+
+@register
+@serializable
+class IouLoss(object):
+ """
+ iou loss, see https://arxiv.org/abs/1908.03851
+ loss = 1.0 - iou * iou
+ Args:
+ loss_weight (float): iou loss weight, default is 2.5
+ max_height (int): max height of input to support random shape input
+ max_width (int): max width of input to support random shape input
+ ciou_term (bool): whether to add ciou_term
+ loss_square (bool): whether to square the iou term
+ """
+
+ def __init__(self,
+ loss_weight=2.5,
+ giou=False,
+ diou=False,
+ ciou=False,
+ loss_square=True):
+ self.loss_weight = loss_weight
+ self.giou = giou
+ self.diou = diou
+ self.ciou = ciou
+ self.loss_square = loss_square
+
+ def __call__(self, pbox, gbox):
+ iou = bbox_iou(
+ pbox, gbox, giou=self.giou, diou=self.diou, ciou=self.ciou)
+ if self.loss_square:
+ loss_iou = 1 - iou * iou
+ else:
+ loss_iou = 1 - iou
+
+ loss_iou = loss_iou * self.loss_weight
+ return loss_iou
+
+
+@register
+@serializable
+class GIoULoss(object):
+ """
+ Generalized Intersection over Union, see https://arxiv.org/abs/1902.09630
+ Args:
+ loss_weight (float): giou loss weight, default as 1
+ eps (float): epsilon to avoid divide by zero, default as 1e-10
+ reduction (string): Options are "none", "mean" and "sum". default as none
+ """
+
+ def __init__(self, loss_weight=1., eps=1e-10, reduction='none'):
+ self.loss_weight = loss_weight
+ self.eps = eps
+ assert reduction in ('none', 'mean', 'sum')
+ self.reduction = reduction
+
+ def bbox_overlap(self, box1, box2, eps=1e-10):
+ """calculate the iou of box1 and box2
+ Args:
+ box1 (Tensor): box1 with the shape (..., 4)
+ box2 (Tensor): box1 with the shape (..., 4)
+ eps (float): epsilon to avoid divide by zero
+ Return:
+ iou (Tensor): iou of box1 and box2
+ overlap (Tensor): overlap of box1 and box2
+ union (Tensor): union of box1 and box2
+ """
+ x1, y1, x2, y2 = box1
+ x1g, y1g, x2g, y2g = box2
+
+ xkis1 = paddle.maximum(x1, x1g)
+ ykis1 = paddle.maximum(y1, y1g)
+ xkis2 = paddle.minimum(x2, x2g)
+ ykis2 = paddle.minimum(y2, y2g)
+ w_inter = (xkis2 - xkis1).clip(0)
+ h_inter = (ykis2 - ykis1).clip(0)
+ overlap = w_inter * h_inter
+
+ area1 = (x2 - x1) * (y2 - y1)
+ area2 = (x2g - x1g) * (y2g - y1g)
+ union = area1 + area2 - overlap + eps
+ iou = overlap / union
+
+ return iou, overlap, union
+
+ def __call__(self, pbox, gbox, iou_weight=1., loc_reweight=None):
+ x1, y1, x2, y2 = paddle.split(pbox, num_or_sections=4, axis=-1)
+ x1g, y1g, x2g, y2g = paddle.split(gbox, num_or_sections=4, axis=-1)
+ box1 = [x1, y1, x2, y2]
+ box2 = [x1g, y1g, x2g, y2g]
+ iou, overlap, union = self.bbox_overlap(box1, box2, self.eps)
+ xc1 = paddle.minimum(x1, x1g)
+ yc1 = paddle.minimum(y1, y1g)
+ xc2 = paddle.maximum(x2, x2g)
+ yc2 = paddle.maximum(y2, y2g)
+
+ area_c = (xc2 - xc1) * (yc2 - yc1) + self.eps
+ miou = iou - ((area_c - union) / area_c)
+ if loc_reweight is not None:
+ loc_reweight = paddle.reshape(loc_reweight, shape=(-1, 1))
+ loc_thresh = 0.9
+ giou = 1 - (1 - loc_thresh
+ ) * miou - loc_thresh * miou * loc_reweight
+ else:
+ giou = 1 - miou
+ if self.reduction == 'none':
+ loss = giou
+ elif self.reduction == 'sum':
+ loss = paddle.sum(giou * iou_weight)
+ else:
+ loss = paddle.mean(giou * iou_weight)
+ return loss * self.loss_weight
+
+
+@register
+@serializable
+class DIouLoss(GIoULoss):
+ """
+ Distance-IoU Loss, see https://arxiv.org/abs/1911.08287
+ Args:
+ loss_weight (float): giou loss weight, default as 1
+ eps (float): epsilon to avoid divide by zero, default as 1e-10
+ use_complete_iou_loss (bool): whether to use complete iou loss
+ """
+
+ def __init__(self, loss_weight=1., eps=1e-10, use_complete_iou_loss=True):
+ super(DIouLoss, self).__init__(loss_weight=loss_weight, eps=eps)
+ self.use_complete_iou_loss = use_complete_iou_loss
+
+ def __call__(self, pbox, gbox, iou_weight=1.):
+ x1, y1, x2, y2 = paddle.split(pbox, num_or_sections=4, axis=-1)
+ x1g, y1g, x2g, y2g = paddle.split(gbox, num_or_sections=4, axis=-1)
+ cx = (x1 + x2) / 2
+ cy = (y1 + y2) / 2
+ w = x2 - x1
+ h = y2 - y1
+
+ cxg = (x1g + x2g) / 2
+ cyg = (y1g + y2g) / 2
+ wg = x2g - x1g
+ hg = y2g - y1g
+
+ x2 = paddle.maximum(x1, x2)
+ y2 = paddle.maximum(y1, y2)
+
+ # A and B
+ xkis1 = paddle.maximum(x1, x1g)
+ ykis1 = paddle.maximum(y1, y1g)
+ xkis2 = paddle.minimum(x2, x2g)
+ ykis2 = paddle.minimum(y2, y2g)
+
+ # A or B
+ xc1 = paddle.minimum(x1, x1g)
+ yc1 = paddle.minimum(y1, y1g)
+ xc2 = paddle.maximum(x2, x2g)
+ yc2 = paddle.maximum(y2, y2g)
+
+ intsctk = (xkis2 - xkis1) * (ykis2 - ykis1)
+ intsctk = intsctk * paddle.greater_than(
+ xkis2, xkis1) * paddle.greater_than(ykis2, ykis1)
+ unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g
+ ) - intsctk + self.eps
+ iouk = intsctk / unionk
+
+ # DIOU term
+ dist_intersection = (cx - cxg) * (cx - cxg) + (cy - cyg) * (cy - cyg)
+ dist_union = (xc2 - xc1) * (xc2 - xc1) + (yc2 - yc1) * (yc2 - yc1)
+ diou_term = (dist_intersection + self.eps) / (dist_union + self.eps)
+
+ # CIOU term
+ ciou_term = 0
+ if self.use_complete_iou_loss:
+ ar_gt = wg / hg
+ ar_pred = w / h
+ arctan = paddle.atan(ar_gt) - paddle.atan(ar_pred)
+ ar_loss = 4. / np.pi / np.pi * arctan * arctan
+ alpha = ar_loss / (1 - iouk + ar_loss + self.eps)
+ alpha.stop_gradient = True
+ ciou_term = alpha * ar_loss
+
+ diou = paddle.mean((1 - iouk + ciou_term + diou_term) * iou_weight)
+
+ return diou * self.loss_weight
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/jde_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/jde_loss.py
new file mode 100644
index 000000000..5c3b5a615
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/jde_loss.py
@@ -0,0 +1,193 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register
+
+__all__ = ['JDEDetectionLoss', 'JDEEmbeddingLoss', 'JDELoss']
+
+
+@register
+class JDEDetectionLoss(nn.Layer):
+ __shared__ = ['num_classes']
+
+ def __init__(self, num_classes=1, for_mot=True):
+ super(JDEDetectionLoss, self).__init__()
+ self.num_classes = num_classes
+ self.for_mot = for_mot
+
+ def det_loss(self, p_det, anchor, t_conf, t_box):
+ pshape = paddle.shape(p_det)
+ pshape.stop_gradient = True
+ nB, nGh, nGw = pshape[0], pshape[-2], pshape[-1]
+ nA = len(anchor)
+ p_det = paddle.reshape(
+ p_det, [nB, nA, self.num_classes + 5, nGh, nGw]).transpose(
+ (0, 1, 3, 4, 2))
+
+ # 1. loss_conf: cross_entropy
+ p_conf = p_det[:, :, :, :, 4:6]
+ p_conf_flatten = paddle.reshape(p_conf, [-1, 2])
+ t_conf_flatten = t_conf.flatten()
+ t_conf_flatten = paddle.cast(t_conf_flatten, dtype="int64")
+ t_conf_flatten.stop_gradient = True
+ loss_conf = F.cross_entropy(
+ p_conf_flatten, t_conf_flatten, ignore_index=-1, reduction='mean')
+ loss_conf.stop_gradient = False
+
+ # 2. loss_box: smooth_l1_loss
+ p_box = p_det[:, :, :, :, :4]
+ p_box_flatten = paddle.reshape(p_box, [-1, 4])
+ t_box_flatten = paddle.reshape(t_box, [-1, 4])
+ fg_inds = paddle.nonzero(t_conf_flatten > 0).flatten()
+ if fg_inds.numel() > 0:
+ reg_delta = paddle.gather(p_box_flatten, fg_inds)
+ reg_target = paddle.gather(t_box_flatten, fg_inds)
+ else:
+ reg_delta = paddle.to_tensor([0, 0, 0, 0], dtype='float32')
+ reg_delta.stop_gradient = False
+ reg_target = paddle.to_tensor([0, 0, 0, 0], dtype='float32')
+ reg_target.stop_gradient = True
+ loss_box = F.smooth_l1_loss(
+ reg_delta, reg_target, reduction='mean', delta=1.0)
+ loss_box.stop_gradient = False
+
+ return loss_conf, loss_box
+
+ def forward(self, det_outs, targets, anchors):
+ """
+ Args:
+ det_outs (list[Tensor]): output from detection head, each one
+ is a 4-D Tensor with shape [N, C, H, W].
+ targets (dict): contains 'im_id', 'gt_bbox', 'gt_ide', 'image',
+ 'im_shape', 'scale_factor' and 'tbox', 'tconf', 'tide' of
+ each FPN level.
+ anchors (list[list]): anchor setting of JDE model, N row M col, N is
+ the anchor levels(FPN levels), M is the anchor scales each
+ level.
+ """
+ assert len(det_outs) == len(anchors)
+ loss_confs = []
+ loss_boxes = []
+ for i, (p_det, anchor) in enumerate(zip(det_outs, anchors)):
+ t_conf = targets['tconf{}'.format(i)]
+ t_box = targets['tbox{}'.format(i)]
+
+ loss_conf, loss_box = self.det_loss(p_det, anchor, t_conf, t_box)
+ loss_confs.append(loss_conf)
+ loss_boxes.append(loss_box)
+ if self.for_mot:
+ return {'loss_confs': loss_confs, 'loss_boxes': loss_boxes}
+ else:
+ jde_conf_losses = sum(loss_confs)
+ jde_box_losses = sum(loss_boxes)
+ jde_det_losses = {
+ "loss_conf": jde_conf_losses,
+ "loss_box": jde_box_losses,
+ "loss": jde_conf_losses + jde_box_losses,
+ }
+ return jde_det_losses
+
+
+@register
+class JDEEmbeddingLoss(nn.Layer):
+ def __init__(self, ):
+ super(JDEEmbeddingLoss, self).__init__()
+ self.phony = self.create_parameter(shape=[1], dtype="float32")
+
+ def emb_loss(self, p_ide, t_conf, t_ide, emb_scale, classifier):
+ emb_dim = p_ide.shape[1]
+ p_ide = p_ide.transpose((0, 2, 3, 1))
+ p_ide_flatten = paddle.reshape(p_ide, [-1, emb_dim])
+ mask = t_conf > 0
+ mask = paddle.cast(mask, dtype="int64")
+ mask.stop_gradient = True
+ emb_mask = mask.max(1).flatten()
+ emb_mask_inds = paddle.nonzero(emb_mask > 0).flatten()
+ emb_mask_inds.stop_gradient = True
+ # use max(1) to decide the id, TODO: more reseanable strategy
+ t_ide_flatten = t_ide.max(1).flatten()
+ t_ide_flatten = paddle.cast(t_ide_flatten, dtype="int64")
+ valid_inds = paddle.nonzero(t_ide_flatten != -1).flatten()
+
+ if emb_mask_inds.numel() == 0 or valid_inds.numel() == 0:
+ # loss_ide = paddle.to_tensor([0]) # will be error in gradient backward
+ loss_ide = self.phony * 0 # todo
+ else:
+ embedding = paddle.gather(p_ide_flatten, emb_mask_inds)
+ embedding = emb_scale * F.normalize(embedding)
+ logits = classifier(embedding)
+
+ ide_target = paddle.gather(t_ide_flatten, emb_mask_inds)
+
+ loss_ide = F.cross_entropy(
+ logits, ide_target, ignore_index=-1, reduction='mean')
+ loss_ide.stop_gradient = False
+
+ return loss_ide
+
+ def forward(self, ide_outs, targets, emb_scale, classifier):
+ loss_ides = []
+ for i, p_ide in enumerate(ide_outs):
+ t_conf = targets['tconf{}'.format(i)]
+ t_ide = targets['tide{}'.format(i)]
+
+ loss_ide = self.emb_loss(p_ide, t_conf, t_ide, emb_scale,
+ classifier)
+ loss_ides.append(loss_ide)
+ return loss_ides
+
+
+@register
+class JDELoss(nn.Layer):
+ def __init__(self):
+ super(JDELoss, self).__init__()
+
+ def forward(self, loss_confs, loss_boxes, loss_ides, loss_params_cls,
+ loss_params_reg, loss_params_ide, targets):
+ assert len(loss_confs) == len(loss_boxes) == len(loss_ides)
+ assert len(loss_params_cls) == len(loss_params_reg) == len(
+ loss_params_ide)
+ assert len(loss_confs) == len(loss_params_cls)
+
+ batchsize = targets['gt_bbox'].shape[0]
+ nTargets = paddle.nonzero(paddle.sum(targets['gt_bbox'], axis=2)).shape[
+ 0] / batchsize
+ nTargets = paddle.to_tensor(nTargets, dtype='float32')
+ nTargets.stop_gradient = True
+
+ jde_losses = []
+ for i, (loss_conf, loss_box, loss_ide, l_conf_p, l_box_p,
+ l_ide_p) in enumerate(
+ zip(loss_confs, loss_boxes, loss_ides, loss_params_cls,
+ loss_params_reg, loss_params_ide)):
+
+ jde_loss = l_conf_p(loss_conf) + l_box_p(loss_box) + l_ide_p(
+ loss_ide)
+ jde_losses.append(jde_loss)
+
+ loss_all = {
+ "loss_conf": sum(loss_confs),
+ "loss_box": sum(loss_boxes),
+ "loss_ide": sum(loss_ides),
+ "loss": sum(jde_losses),
+ "nTargets": nTargets,
+ }
+ return loss_all
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/keypoint_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/keypoint_loss.py
new file mode 100644
index 000000000..9c3c113db
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/keypoint_loss.py
@@ -0,0 +1,228 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from itertools import cycle, islice
+from collections import abc
+import paddle
+import paddle.nn as nn
+
+from ppdet.core.workspace import register, serializable
+
+__all__ = ['HrHRNetLoss', 'KeyPointMSELoss']
+
+
+@register
+@serializable
+class KeyPointMSELoss(nn.Layer):
+ def __init__(self, use_target_weight=True, loss_scale=0.5):
+ """
+ KeyPointMSELoss layer
+
+ Args:
+ use_target_weight (bool): whether to use target weight
+ """
+ super(KeyPointMSELoss, self).__init__()
+ self.criterion = nn.MSELoss(reduction='mean')
+ self.use_target_weight = use_target_weight
+ self.loss_scale = loss_scale
+
+ def forward(self, output, records):
+ target = records['target']
+ target_weight = records['target_weight']
+ batch_size = output.shape[0]
+ num_joints = output.shape[1]
+ heatmaps_pred = output.reshape(
+ (batch_size, num_joints, -1)).split(num_joints, 1)
+ heatmaps_gt = target.reshape(
+ (batch_size, num_joints, -1)).split(num_joints, 1)
+ loss = 0
+ for idx in range(num_joints):
+ heatmap_pred = heatmaps_pred[idx].squeeze()
+ heatmap_gt = heatmaps_gt[idx].squeeze()
+ if self.use_target_weight:
+ loss += self.loss_scale * self.criterion(
+ heatmap_pred.multiply(target_weight[:, idx]),
+ heatmap_gt.multiply(target_weight[:, idx]))
+ else:
+ loss += self.loss_scale * self.criterion(heatmap_pred,
+ heatmap_gt)
+ keypoint_losses = dict()
+ keypoint_losses['loss'] = loss / num_joints
+ return keypoint_losses
+
+
+@register
+@serializable
+class HrHRNetLoss(nn.Layer):
+ def __init__(self, num_joints, swahr):
+ """
+ HrHRNetLoss layer
+
+ Args:
+ num_joints (int): number of keypoints
+ """
+ super(HrHRNetLoss, self).__init__()
+ if swahr:
+ self.heatmaploss = HeatMapSWAHRLoss(num_joints)
+ else:
+ self.heatmaploss = HeatMapLoss()
+ self.aeloss = AELoss()
+ self.ziploss = ZipLoss(
+ [self.heatmaploss, self.heatmaploss, self.aeloss])
+
+ def forward(self, inputs, records):
+ targets = []
+ targets.append([records['heatmap_gt1x'], records['mask_1x']])
+ targets.append([records['heatmap_gt2x'], records['mask_2x']])
+ targets.append(records['tagmap'])
+ keypoint_losses = dict()
+ loss = self.ziploss(inputs, targets)
+ keypoint_losses['heatmap_loss'] = loss[0] + loss[1]
+ keypoint_losses['pull_loss'] = loss[2][0]
+ keypoint_losses['push_loss'] = loss[2][1]
+ keypoint_losses['loss'] = recursive_sum(loss)
+ return keypoint_losses
+
+
+class HeatMapLoss(object):
+ def __init__(self, loss_factor=1.0):
+ super(HeatMapLoss, self).__init__()
+ self.loss_factor = loss_factor
+
+ def __call__(self, preds, targets):
+ heatmap, mask = targets
+ loss = ((preds - heatmap)**2 * mask.cast('float').unsqueeze(1))
+ loss = paddle.clip(loss, min=0, max=2).mean()
+ loss *= self.loss_factor
+ return loss
+
+
+class HeatMapSWAHRLoss(object):
+ def __init__(self, num_joints, loss_factor=1.0):
+ super(HeatMapSWAHRLoss, self).__init__()
+ self.loss_factor = loss_factor
+ self.num_joints = num_joints
+
+ def __call__(self, preds, targets):
+ heatmaps_gt, mask = targets
+ heatmaps_pred = preds[0]
+ scalemaps_pred = preds[1]
+
+ heatmaps_scaled_gt = paddle.where(heatmaps_gt > 0, 0.5 * heatmaps_gt * (
+ 1 + (1 +
+ (scalemaps_pred - 1.) * paddle.log(heatmaps_gt + 1e-10))**2),
+ heatmaps_gt)
+
+ regularizer_loss = paddle.mean(
+ paddle.pow((scalemaps_pred - 1.) * (heatmaps_gt > 0).astype(float),
+ 2))
+ omiga = 0.01
+ # thres = 2**(-1/omiga), threshold for positive weight
+ hm_weight = heatmaps_scaled_gt**(
+ omiga
+ ) * paddle.abs(1 - heatmaps_pred) + paddle.abs(heatmaps_pred) * (
+ 1 - heatmaps_scaled_gt**(omiga))
+
+ loss = (((heatmaps_pred - heatmaps_scaled_gt)**2) *
+ mask.cast('float').unsqueeze(1)) * hm_weight
+ loss = loss.mean()
+ loss = self.loss_factor * (loss + 1.0 * regularizer_loss)
+ return loss
+
+
+class AELoss(object):
+ def __init__(self, pull_factor=0.001, push_factor=0.001):
+ super(AELoss, self).__init__()
+ self.pull_factor = pull_factor
+ self.push_factor = push_factor
+
+ def apply_single(self, pred, tagmap):
+ if tagmap.numpy()[:, :, 3].sum() == 0:
+ return (paddle.zeros([1]), paddle.zeros([1]))
+ nonzero = paddle.nonzero(tagmap[:, :, 3] > 0)
+ if nonzero.shape[0] == 0:
+ return (paddle.zeros([1]), paddle.zeros([1]))
+ p_inds = paddle.unique(nonzero[:, 0])
+ num_person = p_inds.shape[0]
+ if num_person == 0:
+ return (paddle.zeros([1]), paddle.zeros([1]))
+
+ pull = 0
+ tagpull_num = 0
+ embs_all = []
+ person_unvalid = 0
+ for person_idx in p_inds.numpy():
+ valid_single = tagmap[person_idx.item()]
+ validkpts = paddle.nonzero(valid_single[:, 3] > 0)
+ valid_single = paddle.index_select(valid_single, validkpts)
+ emb = paddle.gather_nd(pred, valid_single[:, :3])
+ if emb.shape[0] == 1:
+ person_unvalid += 1
+ mean = paddle.mean(emb, axis=0)
+ embs_all.append(mean)
+ pull += paddle.mean(paddle.pow(emb - mean, 2), axis=0)
+ tagpull_num += emb.shape[0]
+ pull /= max(num_person - person_unvalid, 1)
+ if num_person < 2:
+ return pull, paddle.zeros([1])
+
+ embs_all = paddle.stack(embs_all)
+ A = embs_all.expand([num_person, num_person])
+ B = A.transpose([1, 0])
+ diff = A - B
+
+ diff = paddle.pow(diff, 2)
+ push = paddle.exp(-diff)
+ push = paddle.sum(push) - num_person
+
+ push /= 2 * num_person * (num_person - 1)
+ return pull, push
+
+ def __call__(self, preds, tagmaps):
+ bs = preds.shape[0]
+ losses = [
+ self.apply_single(preds[i:i + 1].squeeze(),
+ tagmaps[i:i + 1].squeeze()) for i in range(bs)
+ ]
+ pull = self.pull_factor * sum(loss[0] for loss in losses) / len(losses)
+ push = self.push_factor * sum(loss[1] for loss in losses) / len(losses)
+ return pull, push
+
+
+class ZipLoss(object):
+ def __init__(self, loss_funcs):
+ super(ZipLoss, self).__init__()
+ self.loss_funcs = loss_funcs
+
+ def __call__(self, inputs, targets):
+ assert len(self.loss_funcs) == len(targets) >= len(inputs)
+
+ def zip_repeat(*args):
+ longest = max(map(len, args))
+ filled = [islice(cycle(x), longest) for x in args]
+ return zip(*filled)
+
+ return tuple(
+ fn(x, y)
+ for x, y, fn in zip_repeat(inputs, targets, self.loss_funcs))
+
+
+def recursive_sum(inputs):
+ if isinstance(inputs, abc.Sequence):
+ return sum([recursive_sum(x) for x in inputs])
+ return inputs
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/solov2_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/solov2_loss.py
new file mode 100644
index 000000000..ef97a7707
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/solov2_loss.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn.functional as F
+from ppdet.core.workspace import register, serializable
+
+__all__ = ['SOLOv2Loss']
+
+
+@register
+@serializable
+class SOLOv2Loss(object):
+ """
+ SOLOv2Loss
+ Args:
+ ins_loss_weight (float): Weight of instance loss.
+ focal_loss_gamma (float): Gamma parameter for focal loss.
+ focal_loss_alpha (float): Alpha parameter for focal loss.
+ """
+
+ def __init__(self,
+ ins_loss_weight=3.0,
+ focal_loss_gamma=2.0,
+ focal_loss_alpha=0.25):
+ self.ins_loss_weight = ins_loss_weight
+ self.focal_loss_gamma = focal_loss_gamma
+ self.focal_loss_alpha = focal_loss_alpha
+
+ def _dice_loss(self, input, target):
+ input = paddle.reshape(input, shape=(paddle.shape(input)[0], -1))
+ target = paddle.reshape(target, shape=(paddle.shape(target)[0], -1))
+ a = paddle.sum(input * target, axis=1)
+ b = paddle.sum(input * input, axis=1) + 0.001
+ c = paddle.sum(target * target, axis=1) + 0.001
+ d = (2 * a) / (b + c)
+ return 1 - d
+
+ def __call__(self, ins_pred_list, ins_label_list, cate_preds, cate_labels,
+ num_ins):
+ """
+ Get loss of network of SOLOv2.
+ Args:
+ ins_pred_list (list): Variable list of instance branch output.
+ ins_label_list (list): List of instance labels pre batch.
+ cate_preds (list): Concat Variable list of categroy branch output.
+ cate_labels (list): Concat list of categroy labels pre batch.
+ num_ins (int): Number of positive samples in a mini-batch.
+ Returns:
+ loss_ins (Variable): The instance loss Variable of SOLOv2 network.
+ loss_cate (Variable): The category loss Variable of SOLOv2 network.
+ """
+
+ #1. Ues dice_loss to calculate instance loss
+ loss_ins = []
+ total_weights = paddle.zeros(shape=[1], dtype='float32')
+ for input, target in zip(ins_pred_list, ins_label_list):
+ if input is None:
+ continue
+ target = paddle.cast(target, 'float32')
+ target = paddle.reshape(
+ target,
+ shape=[-1, paddle.shape(input)[-2], paddle.shape(input)[-1]])
+ weights = paddle.cast(
+ paddle.sum(target, axis=[1, 2]) > 0, 'float32')
+ input = F.sigmoid(input)
+ dice_out = paddle.multiply(self._dice_loss(input, target), weights)
+ total_weights += paddle.sum(weights)
+ loss_ins.append(dice_out)
+ loss_ins = paddle.sum(paddle.concat(loss_ins)) / total_weights
+ loss_ins = loss_ins * self.ins_loss_weight
+
+ #2. Ues sigmoid_focal_loss to calculate category loss
+ # expand onehot labels
+ num_classes = cate_preds.shape[-1]
+ cate_labels_bin = F.one_hot(cate_labels, num_classes=num_classes + 1)
+ cate_labels_bin = cate_labels_bin[:, 1:]
+
+ loss_cate = F.sigmoid_focal_loss(
+ cate_preds,
+ label=cate_labels_bin,
+ normalizer=num_ins + 1.,
+ gamma=self.focal_loss_gamma,
+ alpha=self.focal_loss_alpha)
+
+ return loss_ins, loss_cate
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/sparsercnn_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/sparsercnn_loss.py
new file mode 100644
index 000000000..2d36b21a2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/sparsercnn_loss.py
@@ -0,0 +1,425 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/PeizeSun/SparseR-CNN/blob/main/projects/SparseRCNN/sparsercnn/loss.py
+Ths copyright of PeizeSun/SparseR-CNN is as follows:
+MIT License [see LICENSE for details]
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from scipy.optimize import linear_sum_assignment
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.metric import accuracy
+from ppdet.core.workspace import register
+from ppdet.modeling.losses.iou_loss import GIoULoss
+
+__all__ = ["SparseRCNNLoss"]
+
+
+@register
+class SparseRCNNLoss(nn.Layer):
+ """ This class computes the loss for SparseRCNN.
+ The process happens in two steps:
+ 1) we compute hungarian assignment between ground truth boxes and the outputs of the model
+ 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
+ """
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ losses,
+ focal_loss_alpha,
+ focal_loss_gamma,
+ num_classes=80,
+ class_weight=2.,
+ l1_weight=5.,
+ giou_weight=2.):
+ """ Create the criterion.
+ Parameters:
+ num_classes: number of object categories, omitting the special no-object category
+ weight_dict: dict containing as key the names of the losses and as values their relative weight.
+ losses: list of all the losses to be applied. See get_loss for list of available losses.
+ matcher: module able to compute a matching between targets and proposals
+ """
+ super().__init__()
+ self.num_classes = num_classes
+ weight_dict = {
+ "loss_ce": class_weight,
+ "loss_bbox": l1_weight,
+ "loss_giou": giou_weight
+ }
+ self.weight_dict = weight_dict
+ self.losses = losses
+ self.giou_loss = GIoULoss(reduction="sum")
+
+ self.focal_loss_alpha = focal_loss_alpha
+ self.focal_loss_gamma = focal_loss_gamma
+
+ self.matcher = HungarianMatcher(focal_loss_alpha, focal_loss_gamma,
+ class_weight, l1_weight, giou_weight)
+
+ def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
+ """Classification loss (NLL)
+ targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
+ """
+ assert 'pred_logits' in outputs
+ src_logits = outputs['pred_logits']
+
+ idx = self._get_src_permutation_idx(indices)
+ target_classes_o = paddle.concat([
+ paddle.gather(
+ t["labels"], J, axis=0) for t, (_, J) in zip(targets, indices)
+ ])
+ target_classes = paddle.full(
+ src_logits.shape[:2], self.num_classes, dtype="int32")
+ for i, ind in enumerate(zip(idx[0], idx[1])):
+ target_classes[int(ind[0]), int(ind[1])] = target_classes_o[i]
+ target_classes.stop_gradient = True
+
+ src_logits = src_logits.flatten(start_axis=0, stop_axis=1)
+
+ # prepare one_hot target.
+ target_classes = target_classes.flatten(start_axis=0, stop_axis=1)
+ class_ids = paddle.arange(0, self.num_classes)
+ labels = (target_classes.unsqueeze(-1) == class_ids).astype("float32")
+ labels.stop_gradient = True
+
+ # comp focal loss.
+ class_loss = sigmoid_focal_loss(
+ src_logits,
+ labels,
+ alpha=self.focal_loss_alpha,
+ gamma=self.focal_loss_gamma,
+ reduction="sum", ) / num_boxes
+ losses = {'loss_ce': class_loss}
+
+ if log:
+ label_acc = target_classes_o.unsqueeze(-1)
+ src_idx = [src for (src, _) in indices]
+
+ pred_list = []
+ for i in range(outputs["pred_logits"].shape[0]):
+ pred_list.append(
+ paddle.gather(
+ outputs["pred_logits"][i], src_idx[i], axis=0))
+
+ pred = F.sigmoid(paddle.concat(pred_list, axis=0))
+ acc = accuracy(pred, label_acc.astype("int64"))
+ losses["acc"] = acc
+
+ return losses
+
+ def loss_boxes(self, outputs, targets, indices, num_boxes):
+ """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
+ targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
+ The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
+ """
+ assert 'pred_boxes' in outputs # [batch_size, num_proposals, 4]
+ src_idx = [src for (src, _) in indices]
+ src_boxes_list = []
+
+ for i in range(outputs["pred_boxes"].shape[0]):
+ src_boxes_list.append(
+ paddle.gather(
+ outputs["pred_boxes"][i], src_idx[i], axis=0))
+
+ src_boxes = paddle.concat(src_boxes_list, axis=0)
+
+ target_boxes = paddle.concat(
+ [
+ paddle.gather(
+ t['boxes'], I, axis=0)
+ for t, (_, I) in zip(targets, indices)
+ ],
+ axis=0)
+ target_boxes.stop_gradient = True
+ losses = {}
+
+ losses['loss_giou'] = self.giou_loss(src_boxes,
+ target_boxes) / num_boxes
+
+ image_size = paddle.concat([v["img_whwh_tgt"] for v in targets])
+ src_boxes_ = src_boxes / image_size
+ target_boxes_ = target_boxes / image_size
+
+ loss_bbox = F.l1_loss(src_boxes_, target_boxes_, reduction='sum')
+ losses['loss_bbox'] = loss_bbox / num_boxes
+
+ return losses
+
+ def _get_src_permutation_idx(self, indices):
+ # permute predictions following indices
+ batch_idx = paddle.concat(
+ [paddle.full_like(src, i) for i, (src, _) in enumerate(indices)])
+ src_idx = paddle.concat([src for (src, _) in indices])
+ return batch_idx, src_idx
+
+ def _get_tgt_permutation_idx(self, indices):
+ # permute targets following indices
+ batch_idx = paddle.concat(
+ [paddle.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
+ tgt_idx = paddle.concat([tgt for (_, tgt) in indices])
+ return batch_idx, tgt_idx
+
+ def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
+ loss_map = {
+ 'labels': self.loss_labels,
+ 'boxes': self.loss_boxes,
+ }
+ assert loss in loss_map, f'do you really want to compute {loss} loss?'
+ return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
+
+ def forward(self, outputs, targets):
+ """ This performs the loss computation.
+ Parameters:
+ outputs: dict of tensors, see the output specification of the model for the format
+ targets: list of dicts, such that len(targets) == batch_size.
+ The expected keys in each dict depends on the losses applied, see each loss' doc
+ """
+ outputs_without_aux = {
+ k: v
+ for k, v in outputs.items() if k != 'aux_outputs'
+ }
+
+ # Retrieve the matching between the outputs of the last layer and the targets
+ indices = self.matcher(outputs_without_aux, targets)
+
+ # Compute the average number of target boxes accross all nodes, for normalization purposes
+ num_boxes = sum(len(t["labels"]) for t in targets)
+ num_boxes = paddle.to_tensor(
+ [num_boxes],
+ dtype="float32",
+ place=next(iter(outputs.values())).place)
+
+ # Compute all the requested losses
+ losses = {}
+ for loss in self.losses:
+ losses.update(
+ self.get_loss(loss, outputs, targets, indices, num_boxes))
+
+ # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
+ if 'aux_outputs' in outputs:
+ for i, aux_outputs in enumerate(outputs['aux_outputs']):
+ indices = self.matcher(aux_outputs, targets)
+ for loss in self.losses:
+ kwargs = {}
+ if loss == 'labels':
+ # Logging is enabled only for the last layer
+ kwargs = {'log': False}
+ l_dict = self.get_loss(loss, aux_outputs, targets, indices,
+ num_boxes, **kwargs)
+
+ w_dict = {}
+ for k in l_dict.keys():
+ if k in self.weight_dict:
+ w_dict[k + f'_{i}'] = l_dict[k] * self.weight_dict[
+ k]
+ else:
+ w_dict[k + f'_{i}'] = l_dict[k]
+ losses.update(w_dict)
+
+ return losses
+
+
+class HungarianMatcher(nn.Layer):
+ """This class computes an assignment between the targets and the predictions of the network
+ For efficiency reasons, the targets don't include the no_object. Because of this, in general,
+ there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
+ while the others are un-matched (and thus treated as non-objects).
+ """
+
+ def __init__(self,
+ focal_loss_alpha,
+ focal_loss_gamma,
+ cost_class: float=1,
+ cost_bbox: float=1,
+ cost_giou: float=1):
+ """Creates the matcher
+ Params:
+ cost_class: This is the relative weight of the classification error in the matching cost
+ cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
+ cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
+ """
+ super().__init__()
+ self.cost_class = cost_class
+ self.cost_bbox = cost_bbox
+ self.cost_giou = cost_giou
+ self.focal_loss_alpha = focal_loss_alpha
+ self.focal_loss_gamma = focal_loss_gamma
+ assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
+
+ @paddle.no_grad()
+ def forward(self, outputs, targets):
+ """ Performs the matching
+ Args:
+ outputs: This is a dict that contains at least these entries:
+ "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
+ "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
+ eg. outputs = {"pred_logits": pred_logits, "pred_boxes": pred_boxes}
+ targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
+ "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
+ objects in the target) containing the class labels
+ "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
+ eg. targets = [{"labels":labels, "boxes": boxes}, ...,{"labels":labels, "boxes": boxes}]
+ Returns:
+ A list of size batch_size, containing tuples of (index_i, index_j) where:
+ - index_i is the indices of the selected predictions (in order)
+ - index_j is the indices of the corresponding selected targets (in order)
+ For each batch element, it holds:
+ len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
+ """
+ bs, num_queries = outputs["pred_logits"].shape[:2]
+
+ # We flatten to compute the cost matrices in a batch
+ out_prob = F.sigmoid(outputs["pred_logits"].flatten(
+ start_axis=0, stop_axis=1))
+ out_bbox = outputs["pred_boxes"].flatten(start_axis=0, stop_axis=1)
+
+ # Also concat the target labels and boxes
+ tgt_ids = paddle.concat([v["labels"] for v in targets])
+ assert (tgt_ids > -1).all()
+ tgt_bbox = paddle.concat([v["boxes"] for v in targets])
+
+ # Compute the classification cost. Contrary to the loss, we don't use the NLL,
+ # but approximate it in 1 - proba[target class].
+ # The 1 is a constant that doesn't change the matching, it can be ommitted.
+
+ # Compute the classification cost.
+ alpha = self.focal_loss_alpha
+ gamma = self.focal_loss_gamma
+
+ neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(
+ 1 - out_prob + 1e-8).log())
+ pos_cost_class = alpha * ((1 - out_prob)
+ **gamma) * (-(out_prob + 1e-8).log())
+
+ cost_class = paddle.gather(
+ pos_cost_class, tgt_ids, axis=1) - paddle.gather(
+ neg_cost_class, tgt_ids, axis=1)
+
+ # Compute the L1 cost between boxes
+ image_size_out = paddle.concat(
+ [v["img_whwh"].unsqueeze(0) for v in targets])
+ image_size_out = image_size_out.unsqueeze(1).tile(
+ [1, num_queries, 1]).flatten(
+ start_axis=0, stop_axis=1)
+ image_size_tgt = paddle.concat([v["img_whwh_tgt"] for v in targets])
+
+ out_bbox_ = out_bbox / image_size_out
+ tgt_bbox_ = tgt_bbox / image_size_tgt
+ cost_bbox = F.l1_loss(
+ out_bbox_.unsqueeze(-2), tgt_bbox_,
+ reduction='none').sum(-1) # [batch_size * num_queries, num_tgts]
+
+ # Compute the giou cost betwen boxes
+ cost_giou = -get_bboxes_giou(out_bbox, tgt_bbox)
+
+ # Final cost matrix
+ C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
+ C = C.reshape([bs, num_queries, -1])
+
+ sizes = [len(v["boxes"]) for v in targets]
+
+ indices = [
+ linear_sum_assignment(c[i].numpy())
+ for i, c in enumerate(C.split(sizes, -1))
+ ]
+ return [(paddle.to_tensor(
+ i, dtype="int32"), paddle.to_tensor(
+ j, dtype="int32")) for i, j in indices]
+
+
+def box_area(boxes):
+ assert (boxes[:, 2:] >= boxes[:, :2]).all()
+ wh = boxes[:, 2:] - boxes[:, :2]
+ return wh[:, 0] * wh[:, 1]
+
+
+def boxes_iou(boxes1, boxes2):
+ '''
+ Compute iou
+
+ Args:
+ boxes1 (paddle.tensor) shape (N, 4)
+ boxes2 (paddle.tensor) shape (M, 4)
+
+ Return:
+ (paddle.tensor) shape (N, M)
+ '''
+ area1 = box_area(boxes1)
+ area2 = box_area(boxes2)
+
+ lt = paddle.maximum(boxes1.unsqueeze(-2)[:, :, :2], boxes2[:, :2])
+ rb = paddle.minimum(boxes1.unsqueeze(-2)[:, :, 2:], boxes2[:, 2:])
+
+ wh = (rb - lt).astype("float32").clip(min=1e-9)
+ inter = wh[:, :, 0] * wh[:, :, 1]
+
+ union = area1.unsqueeze(-1) + area2 - inter + 1e-9
+
+ iou = inter / union
+ return iou, union
+
+
+def get_bboxes_giou(boxes1, boxes2, eps=1e-9):
+ """calculate the ious of boxes1 and boxes2
+
+ Args:
+ boxes1 (Tensor): shape [N, 4]
+ boxes2 (Tensor): shape [M, 4]
+ eps (float): epsilon to avoid divide by zero
+
+ Return:
+ ious (Tensor): ious of boxes1 and boxes2, with the shape [N, M]
+ """
+ assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
+ assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
+
+ iou, union = boxes_iou(boxes1, boxes2)
+
+ lt = paddle.minimum(boxes1.unsqueeze(-2)[:, :, :2], boxes2[:, :2])
+ rb = paddle.maximum(boxes1.unsqueeze(-2)[:, :, 2:], boxes2[:, 2:])
+
+ wh = (rb - lt).astype("float32").clip(min=eps)
+ enclose_area = wh[:, :, 0] * wh[:, :, 1]
+
+ giou = iou - (enclose_area - union) / enclose_area
+
+ return giou
+
+
+def sigmoid_focal_loss(inputs, targets, alpha, gamma, reduction="sum"):
+
+ assert reduction in ["sum", "mean"
+ ], f'do not support this {reduction} reduction?'
+
+ p = F.sigmoid(inputs)
+ ce_loss = F.binary_cross_entropy_with_logits(
+ inputs, targets, reduction="none")
+ p_t = p * targets + (1 - p) * (1 - targets)
+ loss = ce_loss * ((1 - p_t)**gamma)
+
+ if alpha >= 0:
+ alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
+ loss = alpha_t * loss
+
+ if reduction == "mean":
+ loss = loss.mean()
+ elif reduction == "sum":
+ loss = loss.sum()
+
+ return loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/ssd_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/ssd_loss.py
new file mode 100644
index 000000000..62aecc1f3
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/ssd_loss.py
@@ -0,0 +1,169 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register
+from ..ops import iou_similarity
+from ..bbox_utils import bbox2delta
+
+__all__ = ['SSDLoss']
+
+
+@register
+class SSDLoss(nn.Layer):
+ """
+ SSDLoss
+
+ Args:
+ overlap_threshold (float32, optional): IoU threshold for negative bboxes
+ and positive bboxes, 0.5 by default.
+ neg_pos_ratio (float): The ratio of negative samples / positive samples.
+ loc_loss_weight (float): The weight of loc_loss.
+ conf_loss_weight (float): The weight of conf_loss.
+ prior_box_var (list): Variances corresponding to prior box coord, [0.1,
+ 0.1, 0.2, 0.2] by default.
+ """
+
+ def __init__(self,
+ overlap_threshold=0.5,
+ neg_pos_ratio=3.0,
+ loc_loss_weight=1.0,
+ conf_loss_weight=1.0,
+ prior_box_var=[0.1, 0.1, 0.2, 0.2]):
+ super(SSDLoss, self).__init__()
+ self.overlap_threshold = overlap_threshold
+ self.neg_pos_ratio = neg_pos_ratio
+ self.loc_loss_weight = loc_loss_weight
+ self.conf_loss_weight = conf_loss_weight
+ self.prior_box_var = [1. / a for a in prior_box_var]
+
+ def _bipartite_match_for_batch(self, gt_bbox, gt_label, prior_boxes,
+ bg_index):
+ """
+ Args:
+ gt_bbox (Tensor): [B, N, 4]
+ gt_label (Tensor): [B, N, 1]
+ prior_boxes (Tensor): [A, 4]
+ bg_index (int): Background class index
+ """
+ batch_size, num_priors = gt_bbox.shape[0], prior_boxes.shape[0]
+ ious = iou_similarity(gt_bbox.reshape((-1, 4)), prior_boxes).reshape(
+ (batch_size, -1, num_priors))
+
+ # For each prior box, get the max IoU of all GTs.
+ prior_max_iou, prior_argmax_iou = ious.max(axis=1), ious.argmax(axis=1)
+ # For each GT, get the max IoU of all prior boxes.
+ gt_max_iou, gt_argmax_iou = ious.max(axis=2), ious.argmax(axis=2)
+
+ # Gather target bbox and label according to 'prior_argmax_iou' index.
+ batch_ind = paddle.arange(end=batch_size, dtype='int64').unsqueeze(-1)
+ prior_argmax_iou = paddle.stack(
+ [batch_ind.tile([1, num_priors]), prior_argmax_iou], axis=-1)
+ targets_bbox = paddle.gather_nd(gt_bbox, prior_argmax_iou)
+ targets_label = paddle.gather_nd(gt_label, prior_argmax_iou)
+ # Assign negative
+ bg_index_tensor = paddle.full([batch_size, num_priors, 1], bg_index,
+ 'int64')
+ targets_label = paddle.where(
+ prior_max_iou.unsqueeze(-1) < self.overlap_threshold,
+ bg_index_tensor, targets_label)
+
+ # Ensure each GT can match the max IoU prior box.
+ batch_ind = (batch_ind * num_priors + gt_argmax_iou).flatten()
+ targets_bbox = paddle.scatter(
+ targets_bbox.reshape([-1, 4]), batch_ind,
+ gt_bbox.reshape([-1, 4])).reshape([batch_size, -1, 4])
+ targets_label = paddle.scatter(
+ targets_label.reshape([-1, 1]), batch_ind,
+ gt_label.reshape([-1, 1])).reshape([batch_size, -1, 1])
+ targets_label[:, :1] = bg_index
+
+ # Encode box
+ prior_boxes = prior_boxes.unsqueeze(0).tile([batch_size, 1, 1])
+ targets_bbox = bbox2delta(
+ prior_boxes.reshape([-1, 4]),
+ targets_bbox.reshape([-1, 4]), self.prior_box_var)
+ targets_bbox = targets_bbox.reshape([batch_size, -1, 4])
+
+ return targets_bbox, targets_label
+
+ def _mine_hard_example(self,
+ conf_loss,
+ targets_label,
+ bg_index,
+ mine_neg_ratio=0.01):
+ pos = (targets_label != bg_index).astype(conf_loss.dtype)
+ num_pos = pos.sum(axis=1, keepdim=True)
+ neg = (targets_label == bg_index).astype(conf_loss.dtype)
+
+ conf_loss = conf_loss.detach() * neg
+ loss_idx = conf_loss.argsort(axis=1, descending=True)
+ idx_rank = loss_idx.argsort(axis=1)
+ num_negs = []
+ for i in range(conf_loss.shape[0]):
+ cur_num_pos = num_pos[i]
+ num_neg = paddle.clip(
+ cur_num_pos * self.neg_pos_ratio, max=pos.shape[1])
+ num_neg = num_neg if num_neg > 0 else paddle.to_tensor(
+ [pos.shape[1] * mine_neg_ratio])
+ num_negs.append(num_neg)
+ num_negs = paddle.stack(num_negs).expand_as(idx_rank)
+ neg_mask = (idx_rank < num_negs).astype(conf_loss.dtype)
+
+ return (neg_mask + pos).astype('bool')
+
+ def forward(self, boxes, scores, gt_bbox, gt_label, prior_boxes):
+ boxes = paddle.concat(boxes, axis=1)
+ scores = paddle.concat(scores, axis=1)
+ gt_label = gt_label.unsqueeze(-1).astype('int64')
+ prior_boxes = paddle.concat(prior_boxes, axis=0)
+ bg_index = scores.shape[-1] - 1
+
+ # Match bbox and get targets.
+ targets_bbox, targets_label = \
+ self._bipartite_match_for_batch(gt_bbox, gt_label, prior_boxes, bg_index)
+ targets_bbox.stop_gradient = True
+ targets_label.stop_gradient = True
+
+ # Compute regression loss.
+ # Select positive samples.
+ bbox_mask = paddle.tile(targets_label != bg_index, [1, 1, 4])
+ if bbox_mask.astype(boxes.dtype).sum() > 0:
+ location = paddle.masked_select(boxes, bbox_mask)
+ targets_bbox = paddle.masked_select(targets_bbox, bbox_mask)
+ loc_loss = F.smooth_l1_loss(location, targets_bbox, reduction='sum')
+ loc_loss = loc_loss * self.loc_loss_weight
+ else:
+ loc_loss = paddle.zeros([1])
+
+ # Compute confidence loss.
+ conf_loss = F.cross_entropy(scores, targets_label, reduction="none")
+ # Mining hard examples.
+ label_mask = self._mine_hard_example(
+ conf_loss.squeeze(-1), targets_label.squeeze(-1), bg_index)
+ conf_loss = paddle.masked_select(conf_loss, label_mask.unsqueeze(-1))
+ conf_loss = conf_loss.sum() * self.conf_loss_weight
+
+ # Compute overall weighted loss.
+ normalizer = (targets_label != bg_index).astype('float32').sum().clip(
+ min=1)
+ loss = (conf_loss + loc_loss) / normalizer
+
+ return loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/varifocal_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/varifocal_loss.py
new file mode 100644
index 000000000..42d18a659
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/varifocal_loss.py
@@ -0,0 +1,152 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on:
+# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/varifocal_loss.py
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import numpy as np
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register, serializable
+from ppdet.modeling import ops
+
+__all__ = ['VarifocalLoss']
+
+
+def varifocal_loss(pred,
+ target,
+ alpha=0.75,
+ gamma=2.0,
+ iou_weighted=True,
+ use_sigmoid=True):
+ """`Varifocal Loss `_
+
+ Args:
+ pred (Tensor): The prediction with shape (N, C), C is the
+ number of classes
+ target (Tensor): The learning target of the iou-aware
+ classification score with shape (N, C), C is the number of classes.
+ alpha (float, optional): A balance factor for the negative part of
+ Varifocal Loss, which is different from the alpha of Focal Loss.
+ Defaults to 0.75.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ iou_weighted (bool, optional): Whether to weight the loss of the
+ positive example with the iou target. Defaults to True.
+ """
+ # pred and target should be of the same size
+ assert pred.shape == target.shape
+ if use_sigmoid:
+ pred_new = F.sigmoid(pred)
+ else:
+ pred_new = pred
+ target = target.cast(pred.dtype)
+ if iou_weighted:
+ focal_weight = target * (target > 0.0).cast('float32') + \
+ alpha * (pred_new - target).abs().pow(gamma) * \
+ (target <= 0.0).cast('float32')
+ else:
+ focal_weight = (target > 0.0).cast('float32') + \
+ alpha * (pred_new - target).abs().pow(gamma) * \
+ (target <= 0.0).cast('float32')
+
+ if use_sigmoid:
+ loss = F.binary_cross_entropy_with_logits(
+ pred, target, reduction='none') * focal_weight
+ else:
+ loss = F.binary_cross_entropy(
+ pred, target, reduction='none') * focal_weight
+ loss = loss.sum(axis=1)
+ return loss
+
+
+@register
+@serializable
+class VarifocalLoss(nn.Layer):
+ def __init__(self,
+ use_sigmoid=True,
+ alpha=0.75,
+ gamma=2.0,
+ iou_weighted=True,
+ reduction='mean',
+ loss_weight=1.0):
+ """`Varifocal Loss `_
+
+ Args:
+ use_sigmoid (bool, optional): Whether the prediction is
+ used for sigmoid or softmax. Defaults to True.
+ alpha (float, optional): A balance factor for the negative part of
+ Varifocal Loss, which is different from the alpha of Focal
+ Loss. Defaults to 0.75.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ iou_weighted (bool, optional): Whether to weight the loss of the
+ positive examples with the iou target. Defaults to True.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'. Options are "none", "mean" and
+ "sum".
+ loss_weight (float, optional): Weight of loss. Defaults to 1.0.
+ """
+ super(VarifocalLoss, self).__init__()
+ assert alpha >= 0.0
+ self.use_sigmoid = use_sigmoid
+ self.alpha = alpha
+ self.gamma = gamma
+ self.iou_weighted = iou_weighted
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self, pred, target, weight=None, avg_factor=None):
+ """Forward function.
+
+ Args:
+ pred (Tensor): The prediction.
+ target (Tensor): The learning target of the prediction.
+ weight (Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ Returns:
+ Tensor: The calculated loss
+ """
+ loss = self.loss_weight * varifocal_loss(
+ pred,
+ target,
+ alpha=self.alpha,
+ gamma=self.gamma,
+ iou_weighted=self.iou_weighted,
+ use_sigmoid=self.use_sigmoid)
+
+ if weight is not None:
+ loss = loss * weight
+ if avg_factor is None:
+ if self.reduction == 'none':
+ return loss
+ elif self.reduction == 'mean':
+ return loss.mean()
+ elif self.reduction == 'sum':
+ return loss.sum()
+ else:
+ # if reduction is mean, then average the loss by avg_factor
+ if self.reduction == 'mean':
+ loss = loss.sum() / avg_factor
+ # if reduction is 'none', then do nothing, otherwise raise an error
+ elif self.reduction != 'none':
+ raise ValueError(
+ 'avg_factor can not be used with reduction="sum"')
+ return loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/yolo_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/yolo_loss.py
new file mode 100644
index 000000000..657959cd7
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/losses/yolo_loss.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register
+
+from ..bbox_utils import decode_yolo, xywh2xyxy, iou_similarity
+
+__all__ = ['YOLOv3Loss']
+
+
+def bbox_transform(pbox, anchor, downsample):
+ pbox = decode_yolo(pbox, anchor, downsample)
+ pbox = xywh2xyxy(pbox)
+ return pbox
+
+
+@register
+class YOLOv3Loss(nn.Layer):
+
+ __inject__ = ['iou_loss', 'iou_aware_loss']
+ __shared__ = ['num_classes']
+
+ def __init__(self,
+ num_classes=80,
+ ignore_thresh=0.7,
+ label_smooth=False,
+ downsample=[32, 16, 8],
+ scale_x_y=1.,
+ iou_loss=None,
+ iou_aware_loss=None):
+ """
+ YOLOv3Loss layer
+
+ Args:
+ num_calsses (int): number of foreground classes
+ ignore_thresh (float): threshold to ignore confidence loss
+ label_smooth (bool): whether to use label smoothing
+ downsample (list): downsample ratio for each detection block
+ scale_x_y (float): scale_x_y factor
+ iou_loss (object): IoULoss instance
+ iou_aware_loss (object): IouAwareLoss instance
+ """
+ super(YOLOv3Loss, self).__init__()
+ self.num_classes = num_classes
+ self.ignore_thresh = ignore_thresh
+ self.label_smooth = label_smooth
+ self.downsample = downsample
+ self.scale_x_y = scale_x_y
+ self.iou_loss = iou_loss
+ self.iou_aware_loss = iou_aware_loss
+ self.distill_pairs = []
+
+ def obj_loss(self, pbox, gbox, pobj, tobj, anchor, downsample):
+ # pbox
+ pbox = decode_yolo(pbox, anchor, downsample)
+ pbox = xywh2xyxy(pbox)
+ pbox = paddle.concat(pbox, axis=-1)
+ b = pbox.shape[0]
+ pbox = pbox.reshape((b, -1, 4))
+ # gbox
+ gxy = gbox[:, :, 0:2] - gbox[:, :, 2:4] * 0.5
+ gwh = gbox[:, :, 0:2] + gbox[:, :, 2:4] * 0.5
+ gbox = paddle.concat([gxy, gwh], axis=-1)
+
+ iou = iou_similarity(pbox, gbox)
+ iou.stop_gradient = True
+ iou_max = iou.max(2) # [N, M1]
+ iou_mask = paddle.cast(iou_max <= self.ignore_thresh, dtype=pbox.dtype)
+ iou_mask.stop_gradient = True
+
+ pobj = pobj.reshape((b, -1))
+ tobj = tobj.reshape((b, -1))
+ obj_mask = paddle.cast(tobj > 0, dtype=pbox.dtype)
+ obj_mask.stop_gradient = True
+
+ loss_obj = F.binary_cross_entropy_with_logits(
+ pobj, obj_mask, reduction='none')
+ loss_obj_pos = (loss_obj * tobj)
+ loss_obj_neg = (loss_obj * (1 - obj_mask) * iou_mask)
+ return loss_obj_pos + loss_obj_neg
+
+ def cls_loss(self, pcls, tcls):
+ if self.label_smooth:
+ delta = min(1. / self.num_classes, 1. / 40)
+ pos, neg = 1 - delta, delta
+ # 1 for positive, 0 for negative
+ tcls = pos * paddle.cast(
+ tcls > 0., dtype=tcls.dtype) + neg * paddle.cast(
+ tcls <= 0., dtype=tcls.dtype)
+
+ loss_cls = F.binary_cross_entropy_with_logits(
+ pcls, tcls, reduction='none')
+ return loss_cls
+
+ def yolov3_loss(self, p, t, gt_box, anchor, downsample, scale=1.,
+ eps=1e-10):
+ na = len(anchor)
+ b, c, h, w = p.shape
+ if self.iou_aware_loss:
+ ioup, p = p[:, 0:na, :, :], p[:, na:, :, :]
+ ioup = ioup.unsqueeze(-1)
+ p = p.reshape((b, na, -1, h, w)).transpose((0, 1, 3, 4, 2))
+ x, y = p[:, :, :, :, 0:1], p[:, :, :, :, 1:2]
+ w, h = p[:, :, :, :, 2:3], p[:, :, :, :, 3:4]
+ obj, pcls = p[:, :, :, :, 4:5], p[:, :, :, :, 5:]
+ self.distill_pairs.append([x, y, w, h, obj, pcls])
+
+ t = t.transpose((0, 1, 3, 4, 2))
+ tx, ty = t[:, :, :, :, 0:1], t[:, :, :, :, 1:2]
+ tw, th = t[:, :, :, :, 2:3], t[:, :, :, :, 3:4]
+ tscale = t[:, :, :, :, 4:5]
+ tobj, tcls = t[:, :, :, :, 5:6], t[:, :, :, :, 6:]
+
+ tscale_obj = tscale * tobj
+ loss = dict()
+
+ x = scale * F.sigmoid(x) - 0.5 * (scale - 1.)
+ y = scale * F.sigmoid(y) - 0.5 * (scale - 1.)
+
+ if abs(scale - 1.) < eps:
+ loss_x = F.binary_cross_entropy(x, tx, reduction='none')
+ loss_y = F.binary_cross_entropy(y, ty, reduction='none')
+ loss_xy = tscale_obj * (loss_x + loss_y)
+ else:
+ loss_x = paddle.abs(x - tx)
+ loss_y = paddle.abs(y - ty)
+ loss_xy = tscale_obj * (loss_x + loss_y)
+
+ loss_xy = loss_xy.sum([1, 2, 3, 4]).mean()
+
+ loss_w = paddle.abs(w - tw)
+ loss_h = paddle.abs(h - th)
+ loss_wh = tscale_obj * (loss_w + loss_h)
+ loss_wh = loss_wh.sum([1, 2, 3, 4]).mean()
+
+ loss['loss_xy'] = loss_xy
+ loss['loss_wh'] = loss_wh
+
+ if self.iou_loss is not None:
+ # warn: do not modify x, y, w, h in place
+ box, tbox = [x, y, w, h], [tx, ty, tw, th]
+ pbox = bbox_transform(box, anchor, downsample)
+ gbox = bbox_transform(tbox, anchor, downsample)
+ loss_iou = self.iou_loss(pbox, gbox)
+ loss_iou = loss_iou * tscale_obj
+ loss_iou = loss_iou.sum([1, 2, 3, 4]).mean()
+ loss['loss_iou'] = loss_iou
+
+ if self.iou_aware_loss is not None:
+ box, tbox = [x, y, w, h], [tx, ty, tw, th]
+ pbox = bbox_transform(box, anchor, downsample)
+ gbox = bbox_transform(tbox, anchor, downsample)
+ loss_iou_aware = self.iou_aware_loss(ioup, pbox, gbox)
+ loss_iou_aware = loss_iou_aware * tobj
+ loss_iou_aware = loss_iou_aware.sum([1, 2, 3, 4]).mean()
+ loss['loss_iou_aware'] = loss_iou_aware
+
+ box = [x, y, w, h]
+ loss_obj = self.obj_loss(box, gt_box, obj, tobj, anchor, downsample)
+ loss_obj = loss_obj.sum(-1).mean()
+ loss['loss_obj'] = loss_obj
+ loss_cls = self.cls_loss(pcls, tcls) * tobj
+ loss_cls = loss_cls.sum([1, 2, 3, 4]).mean()
+ loss['loss_cls'] = loss_cls
+ return loss
+
+ def forward(self, inputs, targets, anchors):
+ np = len(inputs)
+ gt_targets = [targets['target{}'.format(i)] for i in range(np)]
+ gt_box = targets['gt_bbox']
+ yolo_losses = dict()
+ self.distill_pairs.clear()
+ for x, t, anchor, downsample in zip(inputs, gt_targets, anchors,
+ self.downsample):
+ yolo_loss = self.yolov3_loss(x, t, gt_box, anchor, downsample,
+ self.scale_x_y)
+ for k, v in yolo_loss.items():
+ if k in yolo_losses:
+ yolo_losses[k] += v
+ else:
+ yolo_losses[k] = v
+
+ loss = 0
+ for k, v in yolo_losses.items():
+ loss += v
+
+ yolo_losses['loss'] = loss
+ return yolo_losses
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__init__.py
new file mode 100644
index 000000000..258e4c901
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import matching
+from . import tracker
+from . import motion
+from . import visualization
+from . import utils
+
+from .matching import *
+from .tracker import *
+from .motion import *
+from .visualization import *
+from .utils import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..2ee33f2d9
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__pycache__/utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 000000000..e00b5655a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__pycache__/utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__pycache__/visualization.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__pycache__/visualization.cpython-37.pyc
new file mode 100644
index 000000000..5b2c5807a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/__pycache__/visualization.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__init__.py
new file mode 100644
index 000000000..54c6680f7
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import jde_matching
+from . import deepsort_matching
+
+from .jde_matching import *
+from .deepsort_matching import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..2ea1093b4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__pycache__/deepsort_matching.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__pycache__/deepsort_matching.cpython-37.pyc
new file mode 100644
index 000000000..c5cb999da
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__pycache__/deepsort_matching.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__pycache__/jde_matching.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__pycache__/jde_matching.cpython-37.pyc
new file mode 100644
index 000000000..37ac0e28c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/__pycache__/jde_matching.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/deepsort_matching.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/deepsort_matching.py
new file mode 100644
index 000000000..3859ccfbd
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/deepsort_matching.py
@@ -0,0 +1,379 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/nwojke/deep_sort/tree/master/deep_sort
+"""
+
+import numpy as np
+from scipy.optimize import linear_sum_assignment
+from ..motion import kalman_filter
+
+INFTY_COST = 1e+5
+
+__all__ = [
+ 'iou_1toN',
+ 'iou_cost',
+ '_nn_euclidean_distance',
+ '_nn_cosine_distance',
+ 'NearestNeighborDistanceMetric',
+ 'min_cost_matching',
+ 'matching_cascade',
+ 'gate_cost_matrix',
+]
+
+
+def iou_1toN(bbox, candidates):
+ """
+ Computer intersection over union (IoU) by one box to N candidates.
+
+ Args:
+ bbox (ndarray): A bounding box in format `(top left x, top left y, width, height)`.
+ candidates (ndarray): A matrix of candidate bounding boxes (one per row) in the
+ same format as `bbox`.
+
+ Returns:
+ ious (ndarray): The intersection over union in [0, 1] between the `bbox`
+ and each candidate. A higher score means a larger fraction of the
+ `bbox` is occluded by the candidate.
+ """
+ bbox_tl = bbox[:2]
+ bbox_br = bbox[:2] + bbox[2:]
+ candidates_tl = candidates[:, :2]
+ candidates_br = candidates[:, :2] + candidates[:, 2:]
+
+ tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
+ np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
+ br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
+ np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
+ wh = np.maximum(0., br - tl)
+
+ area_intersection = wh.prod(axis=1)
+ area_bbox = bbox[2:].prod()
+ area_candidates = candidates[:, 2:].prod(axis=1)
+ ious = area_intersection / (area_bbox + area_candidates - area_intersection)
+ return ious
+
+
+def iou_cost(tracks, detections, track_indices=None, detection_indices=None):
+ """
+ IoU distance metric.
+
+ Args:
+ tracks (list[Track]): A list of tracks.
+ detections (list[Detection]): A list of detections.
+ track_indices (Optional[list[int]]): A list of indices to tracks that
+ should be matched. Defaults to all `tracks`.
+ detection_indices (Optional[list[int]]): A list of indices to detections
+ that should be matched. Defaults to all `detections`.
+
+ Returns:
+ cost_matrix (ndarray): A cost matrix of shape len(track_indices),
+ len(detection_indices) where entry (i, j) is
+ `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
+ """
+ if track_indices is None:
+ track_indices = np.arange(len(tracks))
+ if detection_indices is None:
+ detection_indices = np.arange(len(detections))
+
+ cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
+ for row, track_idx in enumerate(track_indices):
+ if tracks[track_idx].time_since_update > 1:
+ cost_matrix[row, :] = 1e+5
+ continue
+
+ bbox = tracks[track_idx].to_tlwh()
+ candidates = np.asarray([detections[i].tlwh for i in detection_indices])
+ cost_matrix[row, :] = 1. - iou_1toN(bbox, candidates)
+ return cost_matrix
+
+
+def _nn_euclidean_distance(s, q):
+ """
+ Compute pair-wise squared (Euclidean) distance between points in `s` and `q`.
+
+ Args:
+ s (ndarray): Sample points: an NxM matrix of N samples of dimensionality M.
+ q (ndarray): Query points: an LxM matrix of L samples of dimensionality M.
+
+ Returns:
+ distances (ndarray): A vector of length M that contains for each entry in `q` the
+ smallest Euclidean distance to a sample in `s`.
+ """
+ s, q = np.asarray(s), np.asarray(q)
+ if len(s) == 0 or len(q) == 0:
+ return np.zeros((len(s), len(q)))
+ s2, q2 = np.square(s).sum(axis=1), np.square(q).sum(axis=1)
+ distances = -2. * np.dot(s, q.T) + s2[:, None] + q2[None, :]
+ distances = np.clip(distances, 0., float(np.inf))
+
+ return np.maximum(0.0, distances.min(axis=0))
+
+
+def _nn_cosine_distance(s, q):
+ """
+ Compute pair-wise cosine distance between points in `s` and `q`.
+
+ Args:
+ s (ndarray): Sample points: an NxM matrix of N samples of dimensionality M.
+ q (ndarray): Query points: an LxM matrix of L samples of dimensionality M.
+
+ Returns:
+ distances (ndarray): A vector of length M that contains for each entry in `q` the
+ smallest Euclidean distance to a sample in `s`.
+ """
+ s = np.asarray(s) / np.linalg.norm(s, axis=1, keepdims=True)
+ q = np.asarray(q) / np.linalg.norm(q, axis=1, keepdims=True)
+ distances = 1. - np.dot(s, q.T)
+
+ return distances.min(axis=0)
+
+
+class NearestNeighborDistanceMetric(object):
+ """
+ A nearest neighbor distance metric that, for each target, returns
+ the closest distance to any sample that has been observed so far.
+
+ Args:
+ metric (str): Either "euclidean" or "cosine".
+ matching_threshold (float): The matching threshold. Samples with larger
+ distance are considered an invalid match.
+ budget (Optional[int]): If not None, fix samples per class to at most
+ this number. Removes the oldest samples when the budget is reached.
+
+ Attributes:
+ samples (Dict[int -> List[ndarray]]): A dictionary that maps from target
+ identities to the list of samples that have been observed so far.
+ """
+
+ def __init__(self, metric, matching_threshold, budget=None):
+ if metric == "euclidean":
+ self._metric = _nn_euclidean_distance
+ elif metric == "cosine":
+ self._metric = _nn_cosine_distance
+ else:
+ raise ValueError(
+ "Invalid metric; must be either 'euclidean' or 'cosine'")
+ self.matching_threshold = matching_threshold
+ self.budget = budget
+ self.samples = {}
+
+ def partial_fit(self, features, targets, active_targets):
+ """
+ Update the distance metric with new data.
+
+ Args:
+ features (ndarray): An NxM matrix of N features of dimensionality M.
+ targets (ndarray): An integer array of associated target identities.
+ active_targets (List[int]): A list of targets that are currently
+ present in the scene.
+ """
+ for feature, target in zip(features, targets):
+ self.samples.setdefault(target, []).append(feature)
+ if self.budget is not None:
+ self.samples[target] = self.samples[target][-self.budget:]
+ self.samples = {k: self.samples[k] for k in active_targets}
+
+ def distance(self, features, targets):
+ """
+ Compute distance between features and targets.
+
+ Args:
+ features (ndarray): An NxM matrix of N features of dimensionality M.
+ targets (list[int]): A list of targets to match the given `features` against.
+
+ Returns:
+ cost_matrix (ndarray): a cost matrix of shape len(targets), len(features),
+ where element (i, j) contains the closest squared distance between
+ `targets[i]` and `features[j]`.
+ """
+ cost_matrix = np.zeros((len(targets), len(features)))
+ for i, target in enumerate(targets):
+ cost_matrix[i, :] = self._metric(self.samples[target], features)
+ return cost_matrix
+
+
+def min_cost_matching(distance_metric,
+ max_distance,
+ tracks,
+ detections,
+ track_indices=None,
+ detection_indices=None):
+ """
+ Solve linear assignment problem.
+
+ Args:
+ distance_metric :
+ Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
+ The distance metric is given a list of tracks and detections as
+ well as a list of N track indices and M detection indices. The
+ metric should return the NxM dimensional cost matrix, where element
+ (i, j) is the association cost between the i-th track in the given
+ track indices and the j-th detection in the given detection_indices.
+ max_distance (float): Gating threshold. Associations with cost larger
+ than this value are disregarded.
+ tracks (list[Track]): A list of predicted tracks at the current time
+ step.
+ detections (list[Detection]): A list of detections at the current time
+ step.
+ track_indices (list[int]): List of track indices that maps rows in
+ `cost_matrix` to tracks in `tracks`.
+ detection_indices (List[int]): List of detection indices that maps
+ columns in `cost_matrix` to detections in `detections`.
+
+ Returns:
+ A tuple (List[(int, int)], List[int], List[int]) with the following
+ three entries:
+ * A list of matched track and detection indices.
+ * A list of unmatched track indices.
+ * A list of unmatched detection indices.
+ """
+ if track_indices is None:
+ track_indices = np.arange(len(tracks))
+ if detection_indices is None:
+ detection_indices = np.arange(len(detections))
+
+ if len(detection_indices) == 0 or len(track_indices) == 0:
+ return [], track_indices, detection_indices # Nothing to match.
+
+ cost_matrix = distance_metric(tracks, detections, track_indices,
+ detection_indices)
+
+ cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
+ indices = linear_sum_assignment(cost_matrix)
+
+ matches, unmatched_tracks, unmatched_detections = [], [], []
+ for col, detection_idx in enumerate(detection_indices):
+ if col not in indices[1]:
+ unmatched_detections.append(detection_idx)
+ for row, track_idx in enumerate(track_indices):
+ if row not in indices[0]:
+ unmatched_tracks.append(track_idx)
+ for row, col in zip(indices[0], indices[1]):
+ track_idx = track_indices[row]
+ detection_idx = detection_indices[col]
+ if cost_matrix[row, col] > max_distance:
+ unmatched_tracks.append(track_idx)
+ unmatched_detections.append(detection_idx)
+ else:
+ matches.append((track_idx, detection_idx))
+ return matches, unmatched_tracks, unmatched_detections
+
+
+def matching_cascade(distance_metric,
+ max_distance,
+ cascade_depth,
+ tracks,
+ detections,
+ track_indices=None,
+ detection_indices=None):
+ """
+ Run matching cascade.
+
+ Args:
+ distance_metric :
+ Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
+ The distance metric is given a list of tracks and detections as
+ well as a list of N track indices and M detection indices. The
+ metric should return the NxM dimensional cost matrix, where element
+ (i, j) is the association cost between the i-th track in the given
+ track indices and the j-th detection in the given detection_indices.
+ max_distance (float): Gating threshold. Associations with cost larger
+ than this value are disregarded.
+ cascade_depth (int): The cascade depth, should be se to the maximum
+ track age.
+ tracks (list[Track]): A list of predicted tracks at the current time
+ step.
+ detections (list[Detection]): A list of detections at the current time
+ step.
+ track_indices (list[int]): List of track indices that maps rows in
+ `cost_matrix` to tracks in `tracks`.
+ detection_indices (List[int]): List of detection indices that maps
+ columns in `cost_matrix` to detections in `detections`.
+
+ Returns:
+ A tuple (List[(int, int)], List[int], List[int]) with the following
+ three entries:
+ * A list of matched track and detection indices.
+ * A list of unmatched track indices.
+ * A list of unmatched detection indices.
+ """
+ if track_indices is None:
+ track_indices = list(range(len(tracks)))
+ if detection_indices is None:
+ detection_indices = list(range(len(detections)))
+
+ unmatched_detections = detection_indices
+ matches = []
+ for level in range(cascade_depth):
+ if len(unmatched_detections) == 0: # No detections left
+ break
+
+ track_indices_l = [
+ k for k in track_indices if tracks[k].time_since_update == 1 + level
+ ]
+ if len(track_indices_l) == 0: # Nothing to match at this level
+ continue
+
+ matches_l, _, unmatched_detections = \
+ min_cost_matching(
+ distance_metric, max_distance, tracks, detections,
+ track_indices_l, unmatched_detections)
+ matches += matches_l
+ unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
+ return matches, unmatched_tracks, unmatched_detections
+
+
+def gate_cost_matrix(kf,
+ cost_matrix,
+ tracks,
+ detections,
+ track_indices,
+ detection_indices,
+ gated_cost=INFTY_COST,
+ only_position=False):
+ """
+ Invalidate infeasible entries in cost matrix based on the state
+ distributions obtained by Kalman filtering.
+
+ Args:
+ kf (object): The Kalman filter.
+ cost_matrix (ndarray): The NxM dimensional cost matrix, where N is the
+ number of track indices and M is the number of detection indices,
+ such that entry (i, j) is the association cost between
+ `tracks[track_indices[i]]` and `detections[detection_indices[j]]`.
+ tracks (list[Track]): A list of predicted tracks at the current time
+ step.
+ detections (list[Detection]): A list of detections at the current time
+ step.
+ track_indices (List[int]): List of track indices that maps rows in
+ `cost_matrix` to tracks in `tracks`.
+ detection_indices (List[int]): List of detection indices that maps
+ columns in `cost_matrix` to detections in `detections`.
+ gated_cost (Optional[float]): Entries in the cost matrix corresponding
+ to infeasible associations are set this value. Defaults to a very
+ large value.
+ only_position (Optional[bool]): If True, only the x, y position of the
+ state distribution is considered during gating. Default False.
+ """
+ gating_dim = 2 if only_position else 4
+ gating_threshold = kalman_filter.chi2inv95[gating_dim]
+ measurements = np.asarray(
+ [detections[i].to_xyah() for i in detection_indices])
+ for row, track_idx in enumerate(track_indices):
+ track = tracks[track_idx]
+ gating_distance = kf.gating_distance(track.mean, track.covariance,
+ measurements, only_position)
+ cost_matrix[row, gating_distance > gating_threshold] = gated_cost
+ return cost_matrix
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/jde_matching.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/jde_matching.py
new file mode 100644
index 000000000..e9c40dba4
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/matching/jde_matching.py
@@ -0,0 +1,144 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/matching.py
+"""
+
+import lap
+import scipy
+import numpy as np
+from scipy.spatial.distance import cdist
+from ..motion import kalman_filter
+import warnings
+warnings.filterwarnings("ignore")
+
+__all__ = [
+ 'merge_matches',
+ 'linear_assignment',
+ 'cython_bbox_ious',
+ 'iou_distance',
+ 'embedding_distance',
+ 'fuse_motion',
+]
+
+
+def merge_matches(m1, m2, shape):
+ O, P, Q = shape
+ m1 = np.asarray(m1)
+ m2 = np.asarray(m2)
+
+ M1 = scipy.sparse.coo_matrix(
+ (np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
+ M2 = scipy.sparse.coo_matrix(
+ (np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
+
+ mask = M1 * M2
+ match = mask.nonzero()
+ match = list(zip(match[0], match[1]))
+ unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
+ unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
+
+ return match, unmatched_O, unmatched_Q
+
+
+def linear_assignment(cost_matrix, thresh):
+ if cost_matrix.size == 0:
+ return np.empty(
+ (0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(
+ range(cost_matrix.shape[1]))
+ matches, unmatched_a, unmatched_b = [], [], []
+ cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
+ for ix, mx in enumerate(x):
+ if mx >= 0:
+ matches.append([ix, mx])
+ unmatched_a = np.where(x < 0)[0]
+ unmatched_b = np.where(y < 0)[0]
+ matches = np.asarray(matches)
+ return matches, unmatched_a, unmatched_b
+
+
+def cython_bbox_ious(atlbrs, btlbrs):
+ ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
+ if ious.size == 0:
+ return ious
+ try:
+ import cython_bbox
+ except Exception as e:
+ print('cython_bbox not found, please install cython_bbox.'
+ 'for example: `pip install cython_bbox`.')
+ raise e
+
+ ious = cython_bbox.bbox_overlaps(
+ np.ascontiguousarray(
+ atlbrs, dtype=np.float),
+ np.ascontiguousarray(
+ btlbrs, dtype=np.float))
+ return ious
+
+
+def iou_distance(atracks, btracks):
+ """
+ Compute cost based on IoU between two list[STrack].
+ """
+ if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) or (
+ len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
+ atlbrs = atracks
+ btlbrs = btracks
+ else:
+ atlbrs = [track.tlbr for track in atracks]
+ btlbrs = [track.tlbr for track in btracks]
+ _ious = cython_bbox_ious(atlbrs, btlbrs)
+ cost_matrix = 1 - _ious
+
+ return cost_matrix
+
+
+def embedding_distance(tracks, detections, metric='euclidean'):
+ """
+ Compute cost based on features between two list[STrack].
+ """
+ cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
+ if cost_matrix.size == 0:
+ return cost_matrix
+ det_features = np.asarray(
+ [track.curr_feat for track in detections], dtype=np.float)
+ track_features = np.asarray(
+ [track.smooth_feat for track in tracks], dtype=np.float)
+ cost_matrix = np.maximum(0.0, cdist(track_features, det_features,
+ metric)) # Nomalized features
+ return cost_matrix
+
+
+def fuse_motion(kf,
+ cost_matrix,
+ tracks,
+ detections,
+ only_position=False,
+ lambda_=0.98):
+ if cost_matrix.size == 0:
+ return cost_matrix
+ gating_dim = 2 if only_position else 4
+ gating_threshold = kalman_filter.chi2inv95[gating_dim]
+ measurements = np.asarray([det.to_xyah() for det in detections])
+ for row, track in enumerate(tracks):
+ gating_distance = kf.gating_distance(
+ track.mean,
+ track.covariance,
+ measurements,
+ only_position,
+ metric='maha')
+ cost_matrix[row, gating_distance > gating_threshold] = np.inf
+ cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_
+ ) * gating_distance
+ return cost_matrix
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/__init__.py
new file mode 100644
index 000000000..e42dd0b01
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import kalman_filter
+
+from .kalman_filter import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..710870d0f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/__pycache__/kalman_filter.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/__pycache__/kalman_filter.cpython-37.pyc
new file mode 100644
index 000000000..6df67d811
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/__pycache__/kalman_filter.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/kalman_filter.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/kalman_filter.py
new file mode 100644
index 000000000..e3d42ea14
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/motion/kalman_filter.py
@@ -0,0 +1,270 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/nwojke/deep_sort/blob/master/deep_sort/kalman_filter.py
+"""
+
+import numpy as np
+import scipy.linalg
+from ppdet.core.workspace import register, serializable
+
+__all__ = ['KalmanFilter']
+"""
+Table for the 0.95 quantile of the chi-square distribution with N degrees of
+freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
+function and used as Mahalanobis gating threshold.
+"""
+
+chi2inv95 = {
+ 1: 3.8415,
+ 2: 5.9915,
+ 3: 7.8147,
+ 4: 9.4877,
+ 5: 11.070,
+ 6: 12.592,
+ 7: 14.067,
+ 8: 15.507,
+ 9: 16.919
+}
+
+
+@register
+@serializable
+class KalmanFilter(object):
+ """
+ A simple Kalman filter for tracking bounding boxes in image space.
+
+ The 8-dimensional state space
+
+ x, y, a, h, vx, vy, va, vh
+
+ contains the bounding box center position (x, y), aspect ratio a, height h,
+ and their respective velocities.
+
+ Object motion follows a constant velocity model. The bounding box location
+ (x, y, a, h) is taken as direct observation of the state space (linear
+ observation model).
+
+ """
+
+ def __init__(self):
+ ndim, dt = 4, 1.
+
+ # Create Kalman filter model matrices.
+ self._motion_mat = np.eye(2 * ndim, 2 * ndim)
+ for i in range(ndim):
+ self._motion_mat[i, ndim + i] = dt
+ self._update_mat = np.eye(ndim, 2 * ndim)
+
+ # Motion and observation uncertainty are chosen relative to the current
+ # state estimate. These weights control the amount of uncertainty in
+ # the model. This is a bit hacky.
+ self._std_weight_position = 1. / 20
+ self._std_weight_velocity = 1. / 160
+
+ def initiate(self, measurement):
+ """
+ Create track from unassociated measurement.
+
+ Args:
+ measurement (ndarray): Bounding box coordinates (x, y, a, h) with
+ center position (x, y), aspect ratio a, and height h.
+
+ Returns:
+ The mean vector (8 dimensional) and covariance matrix (8x8
+ dimensional) of the new track. Unobserved velocities are
+ initialized to 0 mean.
+ """
+ mean_pos = measurement
+ mean_vel = np.zeros_like(mean_pos)
+ mean = np.r_[mean_pos, mean_vel]
+
+ std = [
+ 2 * self._std_weight_position * measurement[3],
+ 2 * self._std_weight_position * measurement[3], 1e-2,
+ 2 * self._std_weight_position * measurement[3],
+ 10 * self._std_weight_velocity * measurement[3],
+ 10 * self._std_weight_velocity * measurement[3], 1e-5,
+ 10 * self._std_weight_velocity * measurement[3]
+ ]
+ covariance = np.diag(np.square(std))
+ return mean, covariance
+
+ def predict(self, mean, covariance):
+ """
+ Run Kalman filter prediction step.
+
+ Args:
+ mean (ndarray): The 8 dimensional mean vector of the object state
+ at the previous time step.
+ covariance (ndarray): The 8x8 dimensional covariance matrix of the
+ object state at the previous time step.
+
+ Returns:
+ The mean vector and covariance matrix of the predicted state.
+ Unobserved velocities are initialized to 0 mean.
+ """
+ std_pos = [
+ self._std_weight_position * mean[3], self._std_weight_position *
+ mean[3], 1e-2, self._std_weight_position * mean[3]
+ ]
+ std_vel = [
+ self._std_weight_velocity * mean[3], self._std_weight_velocity *
+ mean[3], 1e-5, self._std_weight_velocity * mean[3]
+ ]
+ motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
+
+ #mean = np.dot(self._motion_mat, mean)
+ mean = np.dot(mean, self._motion_mat.T)
+ covariance = np.linalg.multi_dot(
+ (self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
+
+ return mean, covariance
+
+ def project(self, mean, covariance):
+ """
+ Project state distribution to measurement space.
+
+ Args
+ mean (ndarray): The state's mean vector (8 dimensional array).
+ covariance (ndarray): The state's covariance matrix (8x8 dimensional).
+
+ Returns:
+ The projected mean and covariance matrix of the given state estimate.
+ """
+ std = [
+ self._std_weight_position * mean[3], self._std_weight_position *
+ mean[3], 1e-1, self._std_weight_position * mean[3]
+ ]
+ innovation_cov = np.diag(np.square(std))
+
+ mean = np.dot(self._update_mat, mean)
+ covariance = np.linalg.multi_dot((self._update_mat, covariance,
+ self._update_mat.T))
+ return mean, covariance + innovation_cov
+
+ def multi_predict(self, mean, covariance):
+ """
+ Run Kalman filter prediction step (Vectorized version).
+
+ Args:
+ mean (ndarray): The Nx8 dimensional mean matrix of the object states
+ at the previous time step.
+ covariance (ndarray): The Nx8x8 dimensional covariance matrics of the
+ object states at the previous time step.
+
+ Returns:
+ The mean vector and covariance matrix of the predicted state.
+ Unobserved velocities are initialized to 0 mean.
+ """
+ std_pos = [
+ self._std_weight_position * mean[:, 3], self._std_weight_position *
+ mean[:, 3], 1e-2 * np.ones_like(mean[:, 3]),
+ self._std_weight_position * mean[:, 3]
+ ]
+ std_vel = [
+ self._std_weight_velocity * mean[:, 3], self._std_weight_velocity *
+ mean[:, 3], 1e-5 * np.ones_like(mean[:, 3]),
+ self._std_weight_velocity * mean[:, 3]
+ ]
+ sqr = np.square(np.r_[std_pos, std_vel]).T
+
+ motion_cov = []
+ for i in range(len(mean)):
+ motion_cov.append(np.diag(sqr[i]))
+ motion_cov = np.asarray(motion_cov)
+
+ mean = np.dot(mean, self._motion_mat.T)
+ left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
+ covariance = np.dot(left, self._motion_mat.T) + motion_cov
+
+ return mean, covariance
+
+ def update(self, mean, covariance, measurement):
+ """
+ Run Kalman filter correction step.
+
+ Args:
+ mean (ndarray): The predicted state's mean vector (8 dimensional).
+ covariance (ndarray): The state's covariance matrix (8x8 dimensional).
+ measurement (ndarray): The 4 dimensional measurement vector
+ (x, y, a, h), where (x, y) is the center position, a the aspect
+ ratio, and h the height of the bounding box.
+
+ Returns:
+ The measurement-corrected state distribution.
+ """
+ projected_mean, projected_cov = self.project(mean, covariance)
+
+ chol_factor, lower = scipy.linalg.cho_factor(
+ projected_cov, lower=True, check_finite=False)
+ kalman_gain = scipy.linalg.cho_solve(
+ (chol_factor, lower),
+ np.dot(covariance, self._update_mat.T).T,
+ check_finite=False).T
+ innovation = measurement - projected_mean
+
+ new_mean = mean + np.dot(innovation, kalman_gain.T)
+ new_covariance = covariance - np.linalg.multi_dot(
+ (kalman_gain, projected_cov, kalman_gain.T))
+ return new_mean, new_covariance
+
+ def gating_distance(self,
+ mean,
+ covariance,
+ measurements,
+ only_position=False,
+ metric='maha'):
+ """
+ Compute gating distance between state distribution and measurements.
+ A suitable distance threshold can be obtained from `chi2inv95`. If
+ `only_position` is False, the chi-square distribution has 4 degrees of
+ freedom, otherwise 2.
+
+ Args:
+ mean (ndarray): Mean vector over the state distribution (8
+ dimensional).
+ covariance (ndarray): Covariance of the state distribution (8x8
+ dimensional).
+ measurements (ndarray): An Nx4 dimensional matrix of N measurements,
+ each in format (x, y, a, h) where (x, y) is the bounding box center
+ position, a the aspect ratio, and h the height.
+ only_position (Optional[bool]): If True, distance computation is
+ done with respect to the bounding box center position only.
+ metric (str): Metric type, 'gaussian' or 'maha'.
+
+ Returns
+ An array of length N, where the i-th element contains the squared
+ Mahalanobis distance between (mean, covariance) and `measurements[i]`.
+ """
+ mean, covariance = self.project(mean, covariance)
+ if only_position:
+ mean, covariance = mean[:2], covariance[:2, :2]
+ measurements = measurements[:, :2]
+
+ d = measurements - mean
+ if metric == 'gaussian':
+ return np.sum(d * d, axis=1)
+ elif metric == 'maha':
+ cholesky_factor = np.linalg.cholesky(covariance)
+ z = scipy.linalg.solve_triangular(
+ cholesky_factor,
+ d.T,
+ lower=True,
+ check_finite=False,
+ overwrite_b=True)
+ squared_maha = np.sum(z * z, axis=0)
+ return squared_maha
+ else:
+ raise ValueError('invalid distance metric')
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__init__.py
new file mode 100644
index 000000000..b74593b41
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import base_jde_tracker
+from . import base_sde_tracker
+from . import jde_tracker
+from . import deepsort_tracker
+
+from .base_jde_tracker import *
+from .base_sde_tracker import *
+from .jde_tracker import *
+from .deepsort_tracker import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..23093785a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/base_jde_tracker.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/base_jde_tracker.cpython-37.pyc
new file mode 100644
index 000000000..9faefa723
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/base_jde_tracker.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/base_sde_tracker.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/base_sde_tracker.cpython-37.pyc
new file mode 100644
index 000000000..7a16be8f4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/base_sde_tracker.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/deepsort_tracker.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/deepsort_tracker.cpython-37.pyc
new file mode 100644
index 000000000..ab2dffa68
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/deepsort_tracker.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/jde_tracker.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/jde_tracker.cpython-37.pyc
new file mode 100644
index 000000000..8c3fe9ad6
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/__pycache__/jde_tracker.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/base_jde_tracker.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/base_jde_tracker.py
new file mode 100644
index 000000000..8e2ef38bc
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/base_jde_tracker.py
@@ -0,0 +1,297 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/multitracker.py
+"""
+
+import numpy as np
+from collections import defaultdict
+from collections import deque, OrderedDict
+from ..matching import jde_matching as matching
+from ppdet.core.workspace import register, serializable
+import warnings
+warnings.filterwarnings("ignore")
+
+__all__ = [
+ 'TrackState',
+ 'BaseTrack',
+ 'STrack',
+ 'joint_stracks',
+ 'sub_stracks',
+ 'remove_duplicate_stracks',
+]
+
+
+class TrackState(object):
+ New = 0
+ Tracked = 1
+ Lost = 2
+ Removed = 3
+
+
+@register
+@serializable
+class BaseTrack(object):
+ _count_dict = defaultdict(int) # support single class and multi classes
+
+ track_id = 0
+ is_activated = False
+ state = TrackState.New
+
+ history = OrderedDict()
+ features = []
+ curr_feature = None
+ score = 0
+ start_frame = 0
+ frame_id = 0
+ time_since_update = 0
+
+ # multi-camera
+ location = (np.inf, np.inf)
+
+ @property
+ def end_frame(self):
+ return self.frame_id
+
+ @staticmethod
+ def next_id(cls_id):
+ BaseTrack._count_dict[cls_id] += 1
+ return BaseTrack._count_dict[cls_id]
+
+ # @even: reset track id
+ @staticmethod
+ def init_count(num_classes):
+ """
+ Initiate _count for all object classes
+ :param num_classes:
+ """
+ for cls_id in range(num_classes):
+ BaseTrack._count_dict[cls_id] = 0
+
+ @staticmethod
+ def reset_track_count(cls_id):
+ BaseTrack._count_dict[cls_id] = 0
+
+ def activate(self, *args):
+ raise NotImplementedError
+
+ def predict(self):
+ raise NotImplementedError
+
+ def update(self, *args, **kwargs):
+ raise NotImplementedError
+
+ def mark_lost(self):
+ self.state = TrackState.Lost
+
+ def mark_removed(self):
+ self.state = TrackState.Removed
+
+
+@register
+@serializable
+class STrack(BaseTrack):
+ def __init__(self,
+ tlwh,
+ score,
+ temp_feat,
+ num_classes,
+ cls_id,
+ buff_size=30):
+ # object class id
+ self.cls_id = cls_id
+ # wait activate
+ self._tlwh = np.asarray(tlwh, dtype=np.float)
+ self.kalman_filter = None
+ self.mean, self.covariance = None, None
+ self.is_activated = False
+
+ self.score = score
+ self.track_len = 0
+
+ self.smooth_feat = None
+ self.update_features(temp_feat)
+ self.features = deque([], maxlen=buff_size)
+ self.alpha = 0.9
+
+ def update_features(self, feat):
+ # L2 normalizing
+ feat /= np.linalg.norm(feat)
+ self.curr_feat = feat
+ if self.smooth_feat is None:
+ self.smooth_feat = feat
+ else:
+ self.smooth_feat = self.alpha * self.smooth_feat + (1.0 - self.alpha
+ ) * feat
+ self.features.append(feat)
+ self.smooth_feat /= np.linalg.norm(self.smooth_feat)
+
+ def predict(self):
+ mean_state = self.mean.copy()
+ if self.state != TrackState.Tracked:
+ mean_state[7] = 0
+ self.mean, self.covariance = self.kalman_filter.predict(mean_state,
+ self.covariance)
+
+ @staticmethod
+ def multi_predict(tracks, kalman_filter):
+ if len(tracks) > 0:
+ multi_mean = np.asarray([track.mean.copy() for track in tracks])
+ multi_covariance = np.asarray(
+ [track.covariance for track in tracks])
+ for i, st in enumerate(tracks):
+ if st.state != TrackState.Tracked:
+ multi_mean[i][7] = 0
+ multi_mean, multi_covariance = kalman_filter.multi_predict(
+ multi_mean, multi_covariance)
+ for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
+ tracks[i].mean = mean
+ tracks[i].covariance = cov
+
+ def reset_track_id(self):
+ self.reset_track_count(self.cls_id)
+
+ def activate(self, kalman_filter, frame_id):
+ """Start a new track"""
+ self.kalman_filter = kalman_filter
+ # update track id for the object class
+ self.track_id = self.next_id(self.cls_id)
+ self.mean, self.covariance = self.kalman_filter.initiate(
+ self.tlwh_to_xyah(self._tlwh))
+
+ self.track_len = 0
+ self.state = TrackState.Tracked # set flag 'tracked'
+
+ if frame_id == 1: # to record the first frame's detection result
+ self.is_activated = True
+
+ self.frame_id = frame_id
+ self.start_frame = frame_id
+
+ def re_activate(self, new_track, frame_id, new_id=False):
+ self.mean, self.covariance = self.kalman_filter.update(
+ self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh))
+ self.update_features(new_track.curr_feat)
+ self.track_len = 0
+ self.state = TrackState.Tracked
+ self.is_activated = True
+ self.frame_id = frame_id
+ if new_id: # update track id for the object class
+ self.track_id = self.next_id(self.cls_id)
+
+ def update(self, new_track, frame_id, update_feature=True):
+ self.frame_id = frame_id
+ self.track_len += 1
+
+ new_tlwh = new_track.tlwh
+ self.mean, self.covariance = self.kalman_filter.update(
+ self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
+ self.state = TrackState.Tracked # set flag 'tracked'
+ self.is_activated = True # set flag 'activated'
+
+ self.score = new_track.score
+ if update_feature:
+ self.update_features(new_track.curr_feat)
+
+ @property
+ def tlwh(self):
+ """Get current position in bounding box format `(top left x, top left y,
+ width, height)`.
+ """
+ if self.mean is None:
+ return self._tlwh.copy()
+
+ ret = self.mean[:4].copy()
+ ret[2] *= ret[3]
+ ret[:2] -= ret[2:] / 2
+ return ret
+
+ @property
+ def tlbr(self):
+ """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
+ `(top left, bottom right)`.
+ """
+ ret = self.tlwh.copy()
+ ret[2:] += ret[:2]
+ return ret
+
+ @staticmethod
+ def tlwh_to_xyah(tlwh):
+ """Convert bounding box to format `(center x, center y, aspect ratio,
+ height)`, where the aspect ratio is `width / height`.
+ """
+ ret = np.asarray(tlwh).copy()
+ ret[:2] += ret[2:] / 2
+ ret[2] /= ret[3]
+ return ret
+
+ def to_xyah(self):
+ return self.tlwh_to_xyah(self.tlwh)
+
+ @staticmethod
+ def tlbr_to_tlwh(tlbr):
+ ret = np.asarray(tlbr).copy()
+ ret[2:] -= ret[:2]
+ return ret
+
+ @staticmethod
+ def tlwh_to_tlbr(tlwh):
+ ret = np.asarray(tlwh).copy()
+ ret[2:] += ret[:2]
+ return ret
+
+ def __repr__(self):
+ return 'OT_({}-{})_({}-{})'.format(self.cls_id, self.track_id,
+ self.start_frame, self.end_frame)
+
+
+def joint_stracks(tlista, tlistb):
+ exists = {}
+ res = []
+ for t in tlista:
+ exists[t.track_id] = 1
+ res.append(t)
+ for t in tlistb:
+ tid = t.track_id
+ if not exists.get(tid, 0):
+ exists[tid] = 1
+ res.append(t)
+ return res
+
+
+def sub_stracks(tlista, tlistb):
+ stracks = {}
+ for t in tlista:
+ stracks[t.track_id] = t
+ for t in tlistb:
+ tid = t.track_id
+ if stracks.get(tid, 0):
+ del stracks[tid]
+ return list(stracks.values())
+
+
+def remove_duplicate_stracks(stracksa, stracksb):
+ pdist = matching.iou_distance(stracksa, stracksb)
+ pairs = np.where(pdist < 0.15)
+ dupa, dupb = list(), list()
+ for p, q in zip(*pairs):
+ timep = stracksa[p].frame_id - stracksa[p].start_frame
+ timeq = stracksb[q].frame_id - stracksb[q].start_frame
+ if timep > timeq:
+ dupb.append(q)
+ else:
+ dupa.append(p)
+ resa = [t for i, t in enumerate(stracksa) if not i in dupa]
+ resb = [t for i, t in enumerate(stracksb) if not i in dupb]
+ return resa, resb
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/base_sde_tracker.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/base_sde_tracker.py
new file mode 100644
index 000000000..accc2016f
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/base_sde_tracker.py
@@ -0,0 +1,156 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/nwojke/deep_sort/blob/master/deep_sort/track.py
+"""
+
+import datetime
+from ppdet.core.workspace import register, serializable
+
+__all__ = ['TrackState', 'Track']
+
+
+class TrackState(object):
+ """
+ Enumeration type for the single target track state. Newly created tracks are
+ classified as `tentative` until enough evidence has been collected. Then,
+ the track state is changed to `confirmed`. Tracks that are no longer alive
+ are classified as `deleted` to mark them for removal from the set of active
+ tracks.
+ """
+ Tentative = 1
+ Confirmed = 2
+ Deleted = 3
+
+
+@register
+@serializable
+class Track(object):
+ """
+ A single target track with state space `(x, y, a, h)` and associated
+ velocities, where `(x, y)` is the center of the bounding box, `a` is the
+ aspect ratio and `h` is the height.
+
+ Args:
+ mean (ndarray): Mean vector of the initial state distribution.
+ covariance (ndarray): Covariance matrix of the initial state distribution.
+ track_id (int): A unique track identifier.
+ n_init (int): Number of consecutive detections before the track is confirmed.
+ The track state is set to `Deleted` if a miss occurs within the first
+ `n_init` frames.
+ max_age (int): The maximum number of consecutive misses before the track
+ state is set to `Deleted`.
+ cls_id (int): The category id of the tracked box.
+ score (float): The confidence score of the tracked box.
+ feature (Optional[ndarray]): Feature vector of the detection this track
+ originates from. If not None, this feature is added to the `features` cache.
+
+ Attributes:
+ hits (int): Total number of measurement updates.
+ age (int): Total number of frames since first occurance.
+ time_since_update (int): Total number of frames since last measurement
+ update.
+ state (TrackState): The current track state.
+ features (List[ndarray]): A cache of features. On each measurement update,
+ the associated feature vector is added to this list.
+ """
+
+ def __init__(self,
+ mean,
+ covariance,
+ track_id,
+ n_init,
+ max_age,
+ cls_id,
+ score,
+ feature=None):
+ self.mean = mean
+ self.covariance = covariance
+ self.track_id = track_id
+ self.hits = 1
+ self.age = 1
+ self.time_since_update = 0
+ self.cls_id = cls_id
+ self.score = score
+ self.start_time = datetime.datetime.now()
+
+ self.state = TrackState.Tentative
+ self.features = []
+ self.feat = feature
+ if feature is not None:
+ self.features.append(feature)
+
+ self._n_init = n_init
+ self._max_age = max_age
+
+ def to_tlwh(self):
+ """Get position in format `(top left x, top left y, width, height)`."""
+ ret = self.mean[:4].copy()
+ ret[2] *= ret[3]
+ ret[:2] -= ret[2:] / 2
+ return ret
+
+ def to_tlbr(self):
+ """Get position in bounding box format `(min x, miny, max x, max y)`."""
+ ret = self.to_tlwh()
+ ret[2:] = ret[:2] + ret[2:]
+ return ret
+
+ def predict(self, kalman_filter):
+ """
+ Propagate the state distribution to the current time step using a Kalman
+ filter prediction step.
+ """
+ self.mean, self.covariance = kalman_filter.predict(self.mean,
+ self.covariance)
+ self.age += 1
+ self.time_since_update += 1
+
+ def update(self, kalman_filter, detection):
+ """
+ Perform Kalman filter measurement update step and update the associated
+ detection feature cache.
+ """
+ self.mean, self.covariance = kalman_filter.update(self.mean,
+ self.covariance,
+ detection.to_xyah())
+ self.features.append(detection.feature)
+ self.feat = detection.feature
+ self.cls_id = detection.cls_id
+ self.score = detection.score
+
+ self.hits += 1
+ self.time_since_update = 0
+ if self.state == TrackState.Tentative and self.hits >= self._n_init:
+ self.state = TrackState.Confirmed
+
+ def mark_missed(self):
+ """Mark this track as missed (no association at the current time step).
+ """
+ if self.state == TrackState.Tentative:
+ self.state = TrackState.Deleted
+ elif self.time_since_update > self._max_age:
+ self.state = TrackState.Deleted
+
+ def is_tentative(self):
+ """Returns True if this track is tentative (unconfirmed)."""
+ return self.state == TrackState.Tentative
+
+ def is_confirmed(self):
+ """Returns True if this track is confirmed."""
+ return self.state == TrackState.Confirmed
+
+ def is_deleted(self):
+ """Returns True if this track is dead and should be deleted."""
+ return self.state == TrackState.Deleted
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/deepsort_tracker.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/deepsort_tracker.py
new file mode 100644
index 000000000..ef38a67f9
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/deepsort_tracker.py
@@ -0,0 +1,188 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/nwojke/deep_sort/blob/master/deep_sort/tracker.py
+"""
+
+import numpy as np
+
+from ..motion import KalmanFilter
+from ..matching.deepsort_matching import NearestNeighborDistanceMetric
+from ..matching.deepsort_matching import iou_cost, min_cost_matching, matching_cascade, gate_cost_matrix
+from .base_sde_tracker import Track
+from ..utils import Detection
+
+from ppdet.core.workspace import register, serializable
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['DeepSORTTracker']
+
+
+@register
+@serializable
+class DeepSORTTracker(object):
+ """
+ DeepSORT tracker
+
+ Args:
+ input_size (list): input feature map size to reid model, [h, w] format,
+ [64, 192] as default.
+ min_box_area (int): min box area to filter out low quality boxes
+ vertical_ratio (float): w/h, the vertical ratio of the bbox to filter
+ bad results, set 1.6 default for pedestrian tracking. If set <=0
+ means no need to filter bboxes.
+ budget (int): If not None, fix samples per class to at most this number.
+ Removes the oldest samples when the budget is reached.
+ max_age (int): maximum number of missed misses before a track is deleted
+ n_init (float): Number of frames that a track remains in initialization
+ phase. Number of consecutive detections before the track is confirmed.
+ The track state is set to `Deleted` if a miss occurs within the first
+ `n_init` frames.
+ metric_type (str): either "euclidean" or "cosine", the distance metric
+ used for measurement to track association.
+ matching_threshold (float): samples with larger distance are
+ considered an invalid match.
+ max_iou_distance (float): max iou distance threshold
+ motion (object): KalmanFilter instance
+ """
+
+ def __init__(self,
+ input_size=[64, 192],
+ min_box_area=0,
+ vertical_ratio=-1,
+ budget=100,
+ max_age=70,
+ n_init=3,
+ metric_type='cosine',
+ matching_threshold=0.2,
+ max_iou_distance=0.9,
+ motion='KalmanFilter'):
+ self.input_size = input_size
+ self.min_box_area = min_box_area
+ self.vertical_ratio = vertical_ratio
+ self.max_age = max_age
+ self.n_init = n_init
+ self.metric = NearestNeighborDistanceMetric(metric_type,
+ matching_threshold, budget)
+ self.max_iou_distance = max_iou_distance
+ if motion == 'KalmanFilter':
+ self.motion = KalmanFilter()
+
+ self.tracks = []
+ self._next_id = 1
+
+ def predict(self):
+ """
+ Propagate track state distributions one time step forward.
+ This function should be called once every time step, before `update`.
+ """
+ for track in self.tracks:
+ track.predict(self.motion)
+
+ def update(self, pred_dets, pred_embs):
+ """
+ Perform measurement update and track management.
+ Args:
+ pred_dets (np.array): Detection results of the image, the shape is
+ [N, 6], means 'x0, y0, x1, y1, score, cls_id'.
+ pred_embs (np.array): Embedding results of the image, the shape is
+ [N, 128], usually pred_embs.shape[1] is a multiple of 128.
+ """
+ pred_tlwhs = pred_dets[:, :4]
+ pred_scores = pred_dets[:, 4:5]
+ pred_cls_ids = pred_dets[:, 5:]
+
+ detections = [
+ Detection(tlwh, score, feat, cls_id)
+ for tlwh, score, feat, cls_id in zip(pred_tlwhs, pred_scores,
+ pred_embs, pred_cls_ids)
+ ]
+
+ # Run matching cascade.
+ matches, unmatched_tracks, unmatched_detections = \
+ self._match(detections)
+
+ # Update track set.
+ for track_idx, detection_idx in matches:
+ self.tracks[track_idx].update(self.motion,
+ detections[detection_idx])
+ for track_idx in unmatched_tracks:
+ self.tracks[track_idx].mark_missed()
+ for detection_idx in unmatched_detections:
+ self._initiate_track(detections[detection_idx])
+ self.tracks = [t for t in self.tracks if not t.is_deleted()]
+
+ # Update distance metric.
+ active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
+ features, targets = [], []
+ for track in self.tracks:
+ if not track.is_confirmed():
+ continue
+ features += track.features
+ targets += [track.track_id for _ in track.features]
+ track.features = []
+ self.metric.partial_fit(
+ np.asarray(features), np.asarray(targets), active_targets)
+ output_stracks = self.tracks
+ return output_stracks
+
+ def _match(self, detections):
+ def gated_metric(tracks, dets, track_indices, detection_indices):
+ features = np.array([dets[i].feature for i in detection_indices])
+ targets = np.array([tracks[i].track_id for i in track_indices])
+ cost_matrix = self.metric.distance(features, targets)
+ cost_matrix = gate_cost_matrix(self.motion, cost_matrix, tracks,
+ dets, track_indices,
+ detection_indices)
+ return cost_matrix
+
+ # Split track set into confirmed and unconfirmed tracks.
+ confirmed_tracks = [
+ i for i, t in enumerate(self.tracks) if t.is_confirmed()
+ ]
+ unconfirmed_tracks = [
+ i for i, t in enumerate(self.tracks) if not t.is_confirmed()
+ ]
+
+ # Associate confirmed tracks using appearance features.
+ matches_a, unmatched_tracks_a, unmatched_detections = \
+ matching_cascade(
+ gated_metric, self.metric.matching_threshold, self.max_age,
+ self.tracks, detections, confirmed_tracks)
+
+ # Associate remaining tracks together with unconfirmed tracks using IOU.
+ iou_track_candidates = unconfirmed_tracks + [
+ k for k in unmatched_tracks_a
+ if self.tracks[k].time_since_update == 1
+ ]
+ unmatched_tracks_a = [
+ k for k in unmatched_tracks_a
+ if self.tracks[k].time_since_update != 1
+ ]
+ matches_b, unmatched_tracks_b, unmatched_detections = \
+ min_cost_matching(
+ iou_cost, self.max_iou_distance, self.tracks,
+ detections, iou_track_candidates, unmatched_detections)
+
+ matches = matches_a + matches_b
+ unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
+ return matches, unmatched_tracks, unmatched_detections
+
+ def _initiate_track(self, detection):
+ mean, covariance = self.motion.initiate(detection.to_xyah())
+ self.tracks.append(
+ Track(mean, covariance, self._next_id, self.n_init, self.max_age,
+ detection.cls_id, detection.score, detection.feature))
+ self._next_id += 1
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/jde_tracker.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/jde_tracker.py
new file mode 100644
index 000000000..af5411a26
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/tracker/jde_tracker.py
@@ -0,0 +1,273 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This code is based on https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/multitracker.py
+"""
+
+import numpy as np
+from collections import defaultdict
+
+from ..matching import jde_matching as matching
+from ..motion import KalmanFilter
+from .base_jde_tracker import TrackState, STrack
+from .base_jde_tracker import joint_stracks, sub_stracks, remove_duplicate_stracks
+
+from ppdet.core.workspace import register, serializable
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['JDETracker']
+
+
+@register
+@serializable
+class JDETracker(object):
+ __shared__ = ['num_classes']
+ """
+ JDE tracker, support single class and multi classes
+
+ Args:
+ num_classes (int): the number of classes
+ det_thresh (float): threshold of detection score
+ track_buffer (int): buffer for tracker
+ min_box_area (int): min box area to filter out low quality boxes
+ vertical_ratio (float): w/h, the vertical ratio of the bbox to filter
+ bad results. If set <0 means no need to filter bboxes,usually set
+ 1.6 for pedestrian tracking.
+ tracked_thresh (float): linear assignment threshold of tracked
+ stracks and detections
+ r_tracked_thresh (float): linear assignment threshold of
+ tracked stracks and unmatched detections
+ unconfirmed_thresh (float): linear assignment threshold of
+ unconfirmed stracks and unmatched detections
+ motion (str): motion model, KalmanFilter as default
+ conf_thres (float): confidence threshold for tracking
+ metric_type (str): either "euclidean" or "cosine", the distance metric
+ used for measurement to track association.
+ """
+
+ def __init__(self,
+ num_classes=1,
+ det_thresh=0.3,
+ track_buffer=30,
+ min_box_area=200,
+ vertical_ratio=1.6,
+ tracked_thresh=0.7,
+ r_tracked_thresh=0.5,
+ unconfirmed_thresh=0.7,
+ motion='KalmanFilter',
+ conf_thres=0,
+ metric_type='euclidean'):
+ self.num_classes = num_classes
+ self.det_thresh = det_thresh
+ self.track_buffer = track_buffer
+ self.min_box_area = min_box_area
+ self.vertical_ratio = vertical_ratio
+
+ self.tracked_thresh = tracked_thresh
+ self.r_tracked_thresh = r_tracked_thresh
+ self.unconfirmed_thresh = unconfirmed_thresh
+ if motion == 'KalmanFilter':
+ self.motion = KalmanFilter()
+ self.conf_thres = conf_thres
+ self.metric_type = metric_type
+
+ self.frame_id = 0
+ self.tracked_tracks_dict = defaultdict(list) # dict(list[STrack])
+ self.lost_tracks_dict = defaultdict(list) # dict(list[STrack])
+ self.removed_tracks_dict = defaultdict(list) # dict(list[STrack])
+
+ self.max_time_lost = 0
+ # max_time_lost will be calculated: int(frame_rate / 30.0 * track_buffer)
+
+ def update(self, pred_dets, pred_embs):
+ """
+ Processes the image frame and finds bounding box(detections).
+ Associates the detection with corresponding tracklets and also handles
+ lost, removed, refound and active tracklets.
+
+ Args:
+ pred_dets (np.array): Detection results of the image, the shape is
+ [N, 6], means 'x0, y0, x1, y1, score, cls_id'.
+ pred_embs (np.array): Embedding results of the image, the shape is
+ [N, 128] or [N, 512].
+
+ Return:
+ output_stracks_dict (dict(list)): The list contains information
+ regarding the online_tracklets for the recieved image tensor.
+ """
+ self.frame_id += 1
+ if self.frame_id == 1:
+ STrack.init_count(self.num_classes)
+ activated_tracks_dict = defaultdict(list)
+ refined_tracks_dict = defaultdict(list)
+ lost_tracks_dict = defaultdict(list)
+ removed_tracks_dict = defaultdict(list)
+ output_tracks_dict = defaultdict(list)
+
+ pred_dets_dict = defaultdict(list)
+ pred_embs_dict = defaultdict(list)
+
+ # unify single and multi classes detection and embedding results
+ for cls_id in range(self.num_classes):
+ cls_idx = (pred_dets[:, 5:] == cls_id).squeeze(-1)
+ pred_dets_dict[cls_id] = pred_dets[cls_idx]
+ pred_embs_dict[cls_id] = pred_embs[cls_idx]
+
+ for cls_id in range(self.num_classes):
+ """ Step 1: Get detections by class"""
+ pred_dets_cls = pred_dets_dict[cls_id]
+ pred_embs_cls = pred_embs_dict[cls_id]
+ remain_inds = (pred_dets_cls[:, 4:5] > self.conf_thres).squeeze(-1)
+ if remain_inds.sum() > 0:
+ pred_dets_cls = pred_dets_cls[remain_inds]
+ pred_embs_cls = pred_embs_cls[remain_inds]
+ detections = [
+ STrack(
+ STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f,
+ self.num_classes, cls_id, 30)
+ for (tlbrs, f) in zip(pred_dets_cls, pred_embs_cls)
+ ]
+ else:
+ detections = []
+ ''' Add newly detected tracklets to tracked_stracks'''
+ unconfirmed_dict = defaultdict(list)
+ tracked_tracks_dict = defaultdict(list)
+ for track in self.tracked_tracks_dict[cls_id]:
+ if not track.is_activated:
+ # previous tracks which are not active in the current frame are added in unconfirmed list
+ unconfirmed_dict[cls_id].append(track)
+ else:
+ # Active tracks are added to the local list 'tracked_stracks'
+ tracked_tracks_dict[cls_id].append(track)
+ """ Step 2: First association, with embedding"""
+ # building tracking pool for the current frame
+ track_pool_dict = defaultdict(list)
+ track_pool_dict[cls_id] = joint_stracks(
+ tracked_tracks_dict[cls_id], self.lost_tracks_dict[cls_id])
+
+ # Predict the current location with KalmanFilter
+ STrack.multi_predict(track_pool_dict[cls_id], self.motion)
+
+ dists = matching.embedding_distance(
+ track_pool_dict[cls_id], detections, metric=self.metric_type)
+ dists = matching.fuse_motion(self.motion, dists,
+ track_pool_dict[cls_id], detections)
+ matches, u_track, u_detection = matching.linear_assignment(
+ dists, thresh=self.tracked_thresh)
+
+ for i_tracked, idet in matches:
+ # i_tracked is the id of the track and idet is the detection
+ track = track_pool_dict[cls_id][i_tracked]
+ det = detections[idet]
+ if track.state == TrackState.Tracked:
+ # If the track is active, add the detection to the track
+ track.update(detections[idet], self.frame_id)
+ activated_tracks_dict[cls_id].append(track)
+ else:
+ # We have obtained a detection from a track which is not active,
+ # hence put the track in refind_stracks list
+ track.re_activate(det, self.frame_id, new_id=False)
+ refined_tracks_dict[cls_id].append(track)
+
+ # None of the steps below happen if there are no undetected tracks.
+ """ Step 3: Second association, with IOU"""
+ detections = [detections[i] for i in u_detection]
+ r_tracked_stracks = []
+ for i in u_track:
+ if track_pool_dict[cls_id][i].state == TrackState.Tracked:
+ r_tracked_stracks.append(track_pool_dict[cls_id][i])
+
+ dists = matching.iou_distance(r_tracked_stracks, detections)
+ matches, u_track, u_detection = matching.linear_assignment(
+ dists, thresh=self.r_tracked_thresh)
+
+ for i_tracked, idet in matches:
+ track = r_tracked_stracks[i_tracked]
+ det = detections[idet]
+ if track.state == TrackState.Tracked:
+ track.update(det, self.frame_id)
+ activated_tracks_dict[cls_id].append(track)
+ else:
+ track.re_activate(det, self.frame_id, new_id=False)
+ refined_tracks_dict[cls_id].append(track)
+
+ for it in u_track:
+ track = r_tracked_stracks[it]
+ if not track.state == TrackState.Lost:
+ track.mark_lost()
+ lost_tracks_dict[cls_id].append(track)
+ '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
+ detections = [detections[i] for i in u_detection]
+ dists = matching.iou_distance(unconfirmed_dict[cls_id], detections)
+ matches, u_unconfirmed, u_detection = matching.linear_assignment(
+ dists, thresh=self.unconfirmed_thresh)
+ for i_tracked, idet in matches:
+ unconfirmed_dict[cls_id][i_tracked].update(detections[idet],
+ self.frame_id)
+ activated_tracks_dict[cls_id].append(unconfirmed_dict[cls_id][
+ i_tracked])
+ for it in u_unconfirmed:
+ track = unconfirmed_dict[cls_id][it]
+ track.mark_removed()
+ removed_tracks_dict[cls_id].append(track)
+ """ Step 4: Init new stracks"""
+ for inew in u_detection:
+ track = detections[inew]
+ if track.score < self.det_thresh:
+ continue
+ track.activate(self.motion, self.frame_id)
+ activated_tracks_dict[cls_id].append(track)
+ """ Step 5: Update state"""
+ for track in self.lost_tracks_dict[cls_id]:
+ if self.frame_id - track.end_frame > self.max_time_lost:
+ track.mark_removed()
+ removed_tracks_dict[cls_id].append(track)
+
+ self.tracked_tracks_dict[cls_id] = [
+ t for t in self.tracked_tracks_dict[cls_id]
+ if t.state == TrackState.Tracked
+ ]
+ self.tracked_tracks_dict[cls_id] = joint_stracks(
+ self.tracked_tracks_dict[cls_id], activated_tracks_dict[cls_id])
+ self.tracked_tracks_dict[cls_id] = joint_stracks(
+ self.tracked_tracks_dict[cls_id], refined_tracks_dict[cls_id])
+ self.lost_tracks_dict[cls_id] = sub_stracks(
+ self.lost_tracks_dict[cls_id], self.tracked_tracks_dict[cls_id])
+ self.lost_tracks_dict[cls_id].extend(lost_tracks_dict[cls_id])
+ self.lost_tracks_dict[cls_id] = sub_stracks(
+ self.lost_tracks_dict[cls_id], self.removed_tracks_dict[cls_id])
+ self.removed_tracks_dict[cls_id].extend(removed_tracks_dict[cls_id])
+ self.tracked_tracks_dict[cls_id], self.lost_tracks_dict[
+ cls_id] = remove_duplicate_stracks(
+ self.tracked_tracks_dict[cls_id],
+ self.lost_tracks_dict[cls_id])
+
+ # get scores of lost tracks
+ output_tracks_dict[cls_id] = [
+ track for track in self.tracked_tracks_dict[cls_id]
+ if track.is_activated
+ ]
+
+ logger.debug('===========Frame {}=========='.format(self.frame_id))
+ logger.debug('Activated: {}'.format(
+ [track.track_id for track in activated_tracks_dict[cls_id]]))
+ logger.debug('Refind: {}'.format(
+ [track.track_id for track in refined_tracks_dict[cls_id]]))
+ logger.debug('Lost: {}'.format(
+ [track.track_id for track in lost_tracks_dict[cls_id]]))
+ logger.debug('Removed: {}'.format(
+ [track.track_id for track in removed_tracks_dict[cls_id]]))
+
+ return output_tracks_dict
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/utils.py
new file mode 100644
index 000000000..b3657d257
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/utils.py
@@ -0,0 +1,262 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import cv2
+import time
+import numpy as np
+from .visualization import plot_tracking_dict, plot_tracking
+
+__all__ = [
+ 'MOTTimer',
+ 'Detection',
+ 'write_mot_results',
+ 'save_vis_results',
+ 'load_det_results',
+ 'preprocess_reid',
+ 'get_crops',
+ 'clip_box',
+ 'scale_coords',
+]
+
+
+class MOTTimer(object):
+ """
+ This class used to compute and print the current FPS while evaling.
+ """
+
+ def __init__(self):
+ self.total_time = 0.
+ self.calls = 0
+ self.start_time = 0.
+ self.diff = 0.
+ self.average_time = 0.
+ self.duration = 0.
+
+ def tic(self):
+ # using time.time instead of time.clock because time time.clock
+ # does not normalize for multithreading
+ self.start_time = time.time()
+
+ def toc(self, average=True):
+ self.diff = time.time() - self.start_time
+ self.total_time += self.diff
+ self.calls += 1
+ self.average_time = self.total_time / self.calls
+ if average:
+ self.duration = self.average_time
+ else:
+ self.duration = self.diff
+ return self.duration
+
+ def clear(self):
+ self.total_time = 0.
+ self.calls = 0
+ self.start_time = 0.
+ self.diff = 0.
+ self.average_time = 0.
+ self.duration = 0.
+
+
+class Detection(object):
+ """
+ This class represents a bounding box detection in a single image.
+
+ Args:
+ tlwh (Tensor): Bounding box in format `(top left x, top left y,
+ width, height)`.
+ score (Tensor): Bounding box confidence score.
+ feature (Tensor): A feature vector that describes the object
+ contained in this image.
+ cls_id (Tensor): Bounding box category id.
+ """
+
+ def __init__(self, tlwh, score, feature, cls_id):
+ self.tlwh = np.asarray(tlwh, dtype=np.float32)
+ self.score = float(score)
+ self.feature = np.asarray(feature, dtype=np.float32)
+ self.cls_id = int(cls_id)
+
+ def to_tlbr(self):
+ """
+ Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
+ `(top left, bottom right)`.
+ """
+ ret = self.tlwh.copy()
+ ret[2:] += ret[:2]
+ return ret
+
+ def to_xyah(self):
+ """
+ Convert bounding box to format `(center x, center y, aspect ratio,
+ height)`, where the aspect ratio is `width / height`.
+ """
+ ret = self.tlwh.copy()
+ ret[:2] += ret[2:] / 2
+ ret[2] /= ret[3]
+ return ret
+
+
+def write_mot_results(filename, results, data_type='mot', num_classes=1):
+ # support single and multi classes
+ if data_type in ['mot', 'mcmot']:
+ save_format = '{frame},{id},{x1},{y1},{w},{h},{score},{cls_id},-1,-1\n'
+ elif data_type == 'kitti':
+ save_format = '{frame} {id} car 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
+ else:
+ raise ValueError(data_type)
+
+ f = open(filename, 'w')
+ for cls_id in range(num_classes):
+ for frame_id, tlwhs, tscores, track_ids in results[cls_id]:
+ if data_type == 'kitti':
+ frame_id -= 1
+ for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):
+ if track_id < 0: continue
+ if data_type == 'mot':
+ cls_id = -1
+
+ x1, y1, w, h = tlwh
+ x2, y2 = x1 + w, y1 + h
+ line = save_format.format(
+ frame=frame_id,
+ id=track_id,
+ x1=x1,
+ y1=y1,
+ x2=x2,
+ y2=y2,
+ w=w,
+ h=h,
+ score=score,
+ cls_id=cls_id)
+ f.write(line)
+ print('MOT results save in {}'.format(filename))
+
+
+def save_vis_results(data,
+ frame_id,
+ online_ids,
+ online_tlwhs,
+ online_scores,
+ average_time,
+ show_image,
+ save_dir,
+ num_classes=1):
+ if show_image or save_dir is not None:
+ assert 'ori_image' in data
+ img0 = data['ori_image'].numpy()[0]
+ if online_ids is None:
+ online_im = img0
+ else:
+ if isinstance(online_tlwhs, dict):
+ online_im = plot_tracking_dict(
+ img0,
+ num_classes,
+ online_tlwhs,
+ online_ids,
+ online_scores,
+ frame_id=frame_id,
+ fps=1. / average_time)
+ else:
+ online_im = plot_tracking(
+ img0,
+ online_tlwhs,
+ online_ids,
+ online_scores,
+ frame_id=frame_id,
+ fps=1. / average_time)
+ if show_image:
+ cv2.imshow('online_im', online_im)
+ if save_dir is not None:
+ cv2.imwrite(
+ os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
+
+
+def load_det_results(det_file, num_frames):
+ assert os.path.exists(det_file) and os.path.isfile(det_file), \
+ '{} is not exist or not a file.'.format(det_file)
+ labels = np.loadtxt(det_file, dtype='float32', delimiter=',')
+ assert labels.shape[1] == 7, \
+ "Each line of {} should have 7 items: '[frame_id],[x0],[y0],[w],[h],[score],[class_id]'.".format(det_file)
+ results_list = []
+ for frame_i in range(num_frames):
+ results = {'bbox': [], 'score': [], 'cls_id': []}
+ lables_with_frame = labels[labels[:, 0] == frame_i + 1]
+ # each line of lables_with_frame:
+ # [frame_id],[x0],[y0],[w],[h],[score],[class_id]
+ for l in lables_with_frame:
+ results['bbox'].append(l[1:5])
+ results['score'].append(l[5:6])
+ results['cls_id'].append(l[6:7])
+ results_list.append(results)
+ return results_list
+
+
+def scale_coords(coords, input_shape, im_shape, scale_factor):
+ # Note: ratio has only one value, scale_factor[0] == scale_factor[1]
+ #
+ # This function only used for JDE YOLOv3 or other detectors with
+ # LetterBoxResize and JDEBBoxPostProcess, coords output from detector had
+ # not scaled back to the origin image.
+
+ ratio = scale_factor[0]
+ pad_w = (input_shape[1] - int(im_shape[1])) / 2
+ pad_h = (input_shape[0] - int(im_shape[0])) / 2
+ coords[:, 0::2] -= pad_w
+ coords[:, 1::2] -= pad_h
+ coords[:, 0:4] /= ratio
+ coords[:, :4] = np.clip(coords[:, :4], a_min=0, a_max=coords[:, :4].max())
+ return coords.round()
+
+
+def clip_box(xyxy, ori_image_shape):
+ H, W = ori_image_shape
+ xyxy[:, 0::2] = np.clip(xyxy[:, 0::2], a_min=0, a_max=W)
+ xyxy[:, 1::2] = np.clip(xyxy[:, 1::2], a_min=0, a_max=H)
+ w = xyxy[:, 2:3] - xyxy[:, 0:1]
+ h = xyxy[:, 3:4] - xyxy[:, 1:2]
+ mask = np.logical_and(h > 0, w > 0)
+ keep_idx = np.nonzero(mask)
+ return xyxy[keep_idx[0]], keep_idx
+
+
+def get_crops(xyxy, ori_img, w, h):
+ crops = []
+ xyxy = xyxy.astype(np.int64)
+ ori_img = ori_img.numpy()
+ ori_img = np.squeeze(ori_img, axis=0).transpose(1, 0, 2) # [h,w,3]->[w,h,3]
+ for i, bbox in enumerate(xyxy):
+ crop = ori_img[bbox[0]:bbox[2], bbox[1]:bbox[3], :]
+ crops.append(crop)
+ crops = preprocess_reid(crops, w, h)
+ return crops
+
+
+def preprocess_reid(imgs,
+ w=64,
+ h=192,
+ mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225]):
+ im_batch = []
+ for img in imgs:
+ img = cv2.resize(img, (w, h))
+ img = img[:, :, ::-1].astype('float32').transpose((2, 0, 1)) / 255
+ img_mean = np.array(mean).reshape((3, 1, 1))
+ img_std = np.array(std).reshape((3, 1, 1))
+ img -= img_mean
+ img /= img_std
+ img = np.expand_dims(img, axis=0)
+ im_batch.append(img)
+ im_batch = np.concatenate(im_batch, 0)
+ return im_batch
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/visualization.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/visualization.py
new file mode 100644
index 000000000..6d13a2877
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/mot/visualization.py
@@ -0,0 +1,146 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cv2
+import numpy as np
+
+
+def get_color(idx):
+ idx = idx * 3
+ color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
+ return color
+
+
+def plot_tracking(image,
+ tlwhs,
+ obj_ids,
+ scores=None,
+ frame_id=0,
+ fps=0.,
+ ids2names=[]):
+ im = np.ascontiguousarray(np.copy(image))
+ im_h, im_w = im.shape[:2]
+
+ top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
+
+ text_scale = max(1, image.shape[1] / 1600.)
+ text_thickness = 2
+ line_thickness = max(1, int(image.shape[1] / 500.))
+
+ radius = max(5, int(im_w / 140.))
+ cv2.putText(
+ im,
+ 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
+ (0, int(15 * text_scale)),
+ cv2.FONT_HERSHEY_PLAIN,
+ text_scale, (0, 0, 255),
+ thickness=2)
+
+ for i, tlwh in enumerate(tlwhs):
+ x1, y1, w, h = tlwh
+ intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
+ obj_id = int(obj_ids[i])
+ id_text = '{}'.format(int(obj_id))
+ if ids2names != []:
+ assert len(
+ ids2names) == 1, "plot_tracking only supports single classes."
+ id_text = '{}_'.format(ids2names[0]) + id_text
+ _line_thickness = 1 if obj_id <= 0 else line_thickness
+ color = get_color(abs(obj_id))
+ cv2.rectangle(
+ im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)
+ cv2.putText(
+ im,
+ id_text, (intbox[0], intbox[1] - 10),
+ cv2.FONT_HERSHEY_PLAIN,
+ text_scale, (0, 0, 255),
+ thickness=text_thickness)
+
+ if scores is not None:
+ text = '{:.2f}'.format(float(scores[i]))
+ cv2.putText(
+ im,
+ text, (intbox[0], intbox[1] + 10),
+ cv2.FONT_HERSHEY_PLAIN,
+ text_scale, (0, 255, 255),
+ thickness=text_thickness)
+ return im
+
+
+def plot_tracking_dict(image,
+ num_classes,
+ tlwhs_dict,
+ obj_ids_dict,
+ scores_dict,
+ frame_id=0,
+ fps=0.,
+ ids2names=[]):
+ im = np.ascontiguousarray(np.copy(image))
+ im_h, im_w = im.shape[:2]
+
+ top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
+
+ text_scale = max(1, image.shape[1] / 1600.)
+ text_thickness = 2
+ line_thickness = max(1, int(image.shape[1] / 500.))
+
+ radius = max(5, int(im_w / 140.))
+
+ for cls_id in range(num_classes):
+ tlwhs = tlwhs_dict[cls_id]
+ obj_ids = obj_ids_dict[cls_id]
+ scores = scores_dict[cls_id]
+ cv2.putText(
+ im,
+ 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
+ (0, int(15 * text_scale)),
+ cv2.FONT_HERSHEY_PLAIN,
+ text_scale, (0, 0, 255),
+ thickness=2)
+
+ for i, tlwh in enumerate(tlwhs):
+ x1, y1, w, h = tlwh
+ intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
+ obj_id = int(obj_ids[i])
+
+ id_text = '{}'.format(int(obj_id))
+ if ids2names != []:
+ id_text = '{}_{}'.format(ids2names[cls_id], id_text)
+ else:
+ id_text = 'class{}_{}'.format(cls_id, id_text)
+
+ _line_thickness = 1 if obj_id <= 0 else line_thickness
+ color = get_color(abs(obj_id))
+ cv2.rectangle(
+ im,
+ intbox[0:2],
+ intbox[2:4],
+ color=color,
+ thickness=line_thickness)
+ cv2.putText(
+ im,
+ id_text, (intbox[0], intbox[1] - 10),
+ cv2.FONT_HERSHEY_PLAIN,
+ text_scale, (0, 0, 255),
+ thickness=text_thickness)
+
+ if scores is not None:
+ text = '{:.2f}'.format(float(scores[i]))
+ cv2.putText(
+ im,
+ text, (intbox[0], intbox[1] + 10),
+ cv2.FONT_HERSHEY_PLAIN,
+ text_scale, (0, 255, 255),
+ thickness=text_thickness)
+ return im
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__init__.py
new file mode 100644
index 000000000..d66697caf
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__init__.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import fpn
+from . import yolo_fpn
+from . import hrfpn
+from . import ttf_fpn
+from . import centernet_fpn
+from . import bifpn
+from . import csp_pan
+
+from .fpn import *
+from .yolo_fpn import *
+from .hrfpn import *
+from .ttf_fpn import *
+from .centernet_fpn import *
+from .blazeface_fpn import *
+from .bifpn import *
+from .csp_pan import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..922d2c1d6
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/bifpn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/bifpn.cpython-37.pyc
new file mode 100644
index 000000000..57af360d2
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/bifpn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/blazeface_fpn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/blazeface_fpn.cpython-37.pyc
new file mode 100644
index 000000000..256668459
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/blazeface_fpn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/centernet_fpn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/centernet_fpn.cpython-37.pyc
new file mode 100644
index 000000000..a1c91e7a2
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/centernet_fpn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/csp_pan.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/csp_pan.cpython-37.pyc
new file mode 100644
index 000000000..0b14b978b
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/csp_pan.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/fpn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/fpn.cpython-37.pyc
new file mode 100644
index 000000000..d8b5164d3
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/fpn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/hrfpn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/hrfpn.cpython-37.pyc
new file mode 100644
index 000000000..a9738dac7
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/hrfpn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/ttf_fpn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/ttf_fpn.cpython-37.pyc
new file mode 100644
index 000000000..f36ec7708
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/ttf_fpn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/yolo_fpn.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/yolo_fpn.cpython-37.pyc
new file mode 100644
index 000000000..59830319f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/__pycache__/yolo_fpn.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/bifpn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/bifpn.py
new file mode 100644
index 000000000..c60760893
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/bifpn.py
@@ -0,0 +1,302 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import Constant
+
+from ppdet.core.workspace import register, serializable
+from ppdet.modeling.layers import ConvNormLayer
+from ..shape_spec import ShapeSpec
+
+__all__ = ['BiFPN']
+
+
+class SeparableConvLayer(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels=None,
+ kernel_size=3,
+ norm_type='bn',
+ norm_groups=32,
+ act='swish'):
+ super(SeparableConvLayer, self).__init__()
+ assert norm_type in ['bn', 'sync_bn', 'gn', None]
+ assert act in ['swish', 'relu', None]
+
+ self.in_channels = in_channels
+ if out_channels is None:
+ self.out_channels = self.in_channels
+ self.norm_type = norm_type
+ self.norm_groups = norm_groups
+ self.depthwise_conv = nn.Conv2D(
+ in_channels,
+ in_channels,
+ kernel_size,
+ padding=kernel_size // 2,
+ groups=in_channels,
+ bias_attr=False)
+ self.pointwise_conv = nn.Conv2D(in_channels, self.out_channels, 1)
+
+ # norm type
+ if self.norm_type == 'bn':
+ self.norm = nn.BatchNorm2D(self.out_channels)
+ elif self.norm_type == 'sync_bn':
+ self.norm = nn.SyncBatchNorm(self.out_channels)
+ elif self.norm_type == 'gn':
+ self.norm = nn.GroupNorm(
+ num_groups=self.norm_groups, num_channels=self.out_channels)
+
+ # activation
+ if act == 'swish':
+ self.act = nn.Swish()
+ elif act == 'relu':
+ self.act = nn.ReLU()
+
+ def forward(self, x):
+ if self.act is not None:
+ x = self.act(x)
+ out = self.depthwise_conv(x)
+ out = self.pointwise_conv(out)
+ if self.norm_type is not None:
+ out = self.norm(out)
+ return out
+
+
+class BiFPNCell(nn.Layer):
+ def __init__(self,
+ channels=256,
+ num_levels=5,
+ eps=1e-5,
+ use_weighted_fusion=True,
+ kernel_size=3,
+ norm_type='bn',
+ norm_groups=32,
+ act='swish'):
+ super(BiFPNCell, self).__init__()
+ self.channels = channels
+ self.num_levels = num_levels
+ self.eps = eps
+ self.use_weighted_fusion = use_weighted_fusion
+
+ # up
+ self.conv_up = nn.LayerList([
+ SeparableConvLayer(
+ self.channels,
+ kernel_size=kernel_size,
+ norm_type=norm_type,
+ norm_groups=norm_groups,
+ act=act) for _ in range(self.num_levels - 1)
+ ])
+ # down
+ self.conv_down = nn.LayerList([
+ SeparableConvLayer(
+ self.channels,
+ kernel_size=kernel_size,
+ norm_type=norm_type,
+ norm_groups=norm_groups,
+ act=act) for _ in range(self.num_levels - 1)
+ ])
+
+ if self.use_weighted_fusion:
+ self.up_weights = self.create_parameter(
+ shape=[self.num_levels - 1, 2],
+ attr=ParamAttr(initializer=Constant(1.)))
+ self.down_weights = self.create_parameter(
+ shape=[self.num_levels - 1, 3],
+ attr=ParamAttr(initializer=Constant(1.)))
+
+ def _feature_fusion_cell(self,
+ conv_layer,
+ lateral_feat,
+ sampling_feat,
+ route_feat=None,
+ weights=None):
+ if self.use_weighted_fusion:
+ weights = F.relu(weights)
+ weights = weights / (weights.sum() + self.eps)
+ if route_feat is not None:
+ out_feat = weights[0] * lateral_feat + \
+ weights[1] * sampling_feat + \
+ weights[2] * route_feat
+ else:
+ out_feat = weights[0] * lateral_feat + \
+ weights[1] * sampling_feat
+ else:
+ if route_feat is not None:
+ out_feat = lateral_feat + sampling_feat + route_feat
+ else:
+ out_feat = lateral_feat + sampling_feat
+
+ out_feat = conv_layer(out_feat)
+ return out_feat
+
+ def forward(self, feats):
+ # feats: [P3 - P7]
+ lateral_feats = []
+
+ # up
+ up_feature = feats[-1]
+ for i, feature in enumerate(feats[::-1]):
+ if i == 0:
+ lateral_feats.append(feature)
+ else:
+ shape = paddle.shape(feature)
+ up_feature = F.interpolate(
+ up_feature, size=[shape[2], shape[3]])
+ lateral_feature = self._feature_fusion_cell(
+ self.conv_up[i - 1],
+ feature,
+ up_feature,
+ weights=self.up_weights[i - 1]
+ if self.use_weighted_fusion else None)
+ lateral_feats.append(lateral_feature)
+ up_feature = lateral_feature
+
+ out_feats = []
+ # down
+ down_feature = lateral_feats[-1]
+ for i, (lateral_feature,
+ route_feature) in enumerate(zip(lateral_feats[::-1], feats)):
+ if i == 0:
+ out_feats.append(lateral_feature)
+ else:
+ down_feature = F.max_pool2d(down_feature, 3, 2, 1)
+ if i == len(feats) - 1:
+ route_feature = None
+ weights = self.down_weights[
+ i - 1][:2] if self.use_weighted_fusion else None
+ else:
+ weights = self.down_weights[
+ i - 1] if self.use_weighted_fusion else None
+ out_feature = self._feature_fusion_cell(
+ self.conv_down[i - 1],
+ lateral_feature,
+ down_feature,
+ route_feature,
+ weights=weights)
+ out_feats.append(out_feature)
+ down_feature = out_feature
+
+ return out_feats
+
+
+@register
+@serializable
+class BiFPN(nn.Layer):
+ """
+ Bidirectional Feature Pyramid Network, see https://arxiv.org/abs/1911.09070
+
+ Args:
+ in_channels (list[int]): input channels of each level which can be
+ derived from the output shape of backbone by from_config.
+ out_channel (int): output channel of each level.
+ num_extra_levels (int): the number of extra stages added to the last level.
+ default: 2
+ fpn_strides (List): The stride of each level.
+ num_stacks (int): the number of stacks for BiFPN, default: 1.
+ use_weighted_fusion (bool): use weighted feature fusion in BiFPN, default: True.
+ norm_type (string|None): the normalization type in BiFPN module. If
+ norm_type is None, norm will not be used after conv and if
+ norm_type is string, bn, gn, sync_bn are available. default: bn.
+ norm_groups (int): if you use gn, set this param.
+ act (string|None): the activation function of BiFPN.
+ """
+
+ def __init__(self,
+ in_channels=(512, 1024, 2048),
+ out_channel=256,
+ num_extra_levels=2,
+ fpn_strides=[8, 16, 32, 64, 128],
+ num_stacks=1,
+ use_weighted_fusion=True,
+ norm_type='bn',
+ norm_groups=32,
+ act='swish'):
+ super(BiFPN, self).__init__()
+ assert num_stacks > 0, "The number of stacks of BiFPN is at least 1."
+ assert norm_type in ['bn', 'sync_bn', 'gn', None]
+ assert act in ['swish', 'relu', None]
+ assert num_extra_levels >= 0, \
+ "The `num_extra_levels` must be non negative(>=0)."
+
+ self.in_channels = in_channels
+ self.out_channel = out_channel
+ self.num_extra_levels = num_extra_levels
+ self.num_stacks = num_stacks
+ self.use_weighted_fusion = use_weighted_fusion
+ self.norm_type = norm_type
+ self.norm_groups = norm_groups
+ self.act = act
+ self.num_levels = len(self.in_channels) + self.num_extra_levels
+ if len(fpn_strides) != self.num_levels:
+ for i in range(self.num_extra_levels):
+ fpn_strides += [fpn_strides[-1] * 2]
+ self.fpn_strides = fpn_strides
+
+ self.lateral_convs = nn.LayerList()
+ for in_c in in_channels:
+ self.lateral_convs.append(
+ ConvNormLayer(in_c, self.out_channel, 1, 1))
+ if self.num_extra_levels > 0:
+ self.extra_convs = nn.LayerList()
+ for i in range(self.num_extra_levels):
+ if i == 0:
+ self.extra_convs.append(
+ ConvNormLayer(self.in_channels[-1], self.out_channel, 3,
+ 2))
+ else:
+ self.extra_convs.append(nn.MaxPool2D(3, 2, 1))
+
+ self.bifpn_cells = nn.LayerList()
+ for i in range(self.num_stacks):
+ self.bifpn_cells.append(
+ BiFPNCell(
+ self.out_channel,
+ self.num_levels,
+ use_weighted_fusion=self.use_weighted_fusion,
+ norm_type=self.norm_type,
+ norm_groups=self.norm_groups,
+ act=self.act))
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {
+ 'in_channels': [i.channels for i in input_shape],
+ 'fpn_strides': [i.stride for i in input_shape]
+ }
+
+ @property
+ def out_shape(self):
+ return [
+ ShapeSpec(
+ channels=self.out_channel, stride=s) for s in self.fpn_strides
+ ]
+
+ def forward(self, feats):
+ assert len(feats) == len(self.in_channels)
+ fpn_feats = []
+ for conv_layer, feature in zip(self.lateral_convs, feats):
+ fpn_feats.append(conv_layer(feature))
+ if self.num_extra_levels > 0:
+ feat = feats[-1]
+ for conv_layer in self.extra_convs:
+ feat = conv_layer(feat)
+ fpn_feats.append(feat)
+
+ for bifpn_cell in self.bifpn_cells:
+ fpn_feats = bifpn_cell(fpn_feats)
+ return fpn_feats
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/blazeface_fpn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/blazeface_fpn.py
new file mode 100644
index 000000000..18d7f3cf1
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/blazeface_fpn.py
@@ -0,0 +1,216 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn.functional as F
+from paddle import ParamAttr
+import paddle.nn as nn
+from paddle.nn.initializer import KaimingNormal
+from ppdet.core.workspace import register, serializable
+from ..shape_spec import ShapeSpec
+
+__all__ = ['BlazeNeck']
+
+
+def hard_swish(x):
+ return x * F.relu6(x + 3) / 6.
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ padding,
+ num_groups=1,
+ act='relu',
+ conv_lr=0.1,
+ conv_decay=0.,
+ norm_decay=0.,
+ norm_type='bn',
+ name=None):
+ super(ConvBNLayer, self).__init__()
+ self.act = act
+ self._conv = nn.Conv2D(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ groups=num_groups,
+ weight_attr=ParamAttr(
+ learning_rate=conv_lr, initializer=KaimingNormal()),
+ bias_attr=False)
+
+ if norm_type == 'sync_bn':
+ self._batch_norm = nn.SyncBatchNorm(out_channels)
+ else:
+ self._batch_norm = nn.BatchNorm(
+ out_channels, act=None, use_global_stats=False)
+
+ def forward(self, x):
+ x = self._conv(x)
+ x = self._batch_norm(x)
+ if self.act == "relu":
+ x = F.relu(x)
+ elif self.act == "relu6":
+ x = F.relu6(x)
+ elif self.act == 'leaky':
+ x = F.leaky_relu(x)
+ elif self.act == 'hard_swish':
+ x = hard_swish(x)
+ return x
+
+
+class FPN(nn.Layer):
+ def __init__(self, in_channels, out_channels, name=None):
+ super(FPN, self).__init__()
+ self.conv1_fpn = ConvBNLayer(
+ in_channels,
+ out_channels // 2,
+ kernel_size=1,
+ padding=0,
+ stride=1,
+ act='leaky',
+ name=name + '_output1')
+ self.conv2_fpn = ConvBNLayer(
+ in_channels,
+ out_channels // 2,
+ kernel_size=1,
+ padding=0,
+ stride=1,
+ act='leaky',
+ name=name + '_output2')
+ self.conv3_fpn = ConvBNLayer(
+ out_channels // 2,
+ out_channels // 2,
+ kernel_size=3,
+ padding=1,
+ stride=1,
+ act='leaky',
+ name=name + '_merge')
+
+ def forward(self, input):
+ output1 = self.conv1_fpn(input[0])
+ output2 = self.conv2_fpn(input[1])
+ up2 = F.upsample(
+ output2, size=paddle.shape(output1)[-2:], mode='nearest')
+ output1 = paddle.add(output1, up2)
+ output1 = self.conv3_fpn(output1)
+ return output1, output2
+
+
+class SSH(nn.Layer):
+ def __init__(self, in_channels, out_channels, name=None):
+ super(SSH, self).__init__()
+ assert out_channels % 4 == 0
+ self.conv0_ssh = ConvBNLayer(
+ in_channels,
+ out_channels // 2,
+ kernel_size=3,
+ padding=1,
+ stride=1,
+ act=None,
+ name=name + 'ssh_conv3')
+ self.conv1_ssh = ConvBNLayer(
+ out_channels // 2,
+ out_channels // 4,
+ kernel_size=3,
+ padding=1,
+ stride=1,
+ act='leaky',
+ name=name + 'ssh_conv5_1')
+ self.conv2_ssh = ConvBNLayer(
+ out_channels // 4,
+ out_channels // 4,
+ kernel_size=3,
+ padding=1,
+ stride=1,
+ act=None,
+ name=name + 'ssh_conv5_2')
+ self.conv3_ssh = ConvBNLayer(
+ out_channels // 4,
+ out_channels // 4,
+ kernel_size=3,
+ padding=1,
+ stride=1,
+ act='leaky',
+ name=name + 'ssh_conv7_1')
+ self.conv4_ssh = ConvBNLayer(
+ out_channels // 4,
+ out_channels // 4,
+ kernel_size=3,
+ padding=1,
+ stride=1,
+ act=None,
+ name=name + 'ssh_conv7_2')
+
+ def forward(self, x):
+ conv0 = self.conv0_ssh(x)
+ conv1 = self.conv1_ssh(conv0)
+ conv2 = self.conv2_ssh(conv1)
+ conv3 = self.conv3_ssh(conv2)
+ conv4 = self.conv4_ssh(conv3)
+ concat = paddle.concat([conv0, conv2, conv4], axis=1)
+ return F.relu(concat)
+
+
+@register
+@serializable
+class BlazeNeck(nn.Layer):
+ def __init__(self, in_channel, neck_type="None", data_format='NCHW'):
+ super(BlazeNeck, self).__init__()
+ self.neck_type = neck_type
+ self.reture_input = False
+ self._out_channels = in_channel
+ if self.neck_type == 'None':
+ self.reture_input = True
+ if "fpn" in self.neck_type:
+ self.fpn = FPN(self._out_channels[0],
+ self._out_channels[1],
+ name='fpn')
+ self._out_channels = [
+ self._out_channels[0] // 2, self._out_channels[1] // 2
+ ]
+ if "ssh" in self.neck_type:
+ self.ssh1 = SSH(self._out_channels[0],
+ self._out_channels[0],
+ name='ssh1')
+ self.ssh2 = SSH(self._out_channels[1],
+ self._out_channels[1],
+ name='ssh2')
+ self._out_channels = [self._out_channels[0], self._out_channels[1]]
+
+ def forward(self, inputs):
+ if self.reture_input:
+ return inputs
+ output1, output2 = None, None
+ if "fpn" in self.neck_type:
+ backout_4, backout_1 = inputs
+ output1, output2 = self.fpn([backout_4, backout_1])
+ if self.neck_type == "only_fpn":
+ return [output1, output2]
+ if self.neck_type == "only_ssh":
+ output1, output2 = inputs
+ feature1 = self.ssh1(output1)
+ feature2 = self.ssh2(output2)
+ return [feature1, feature2]
+
+ @property
+ def out_shape(self):
+ return [
+ ShapeSpec(channels=c)
+ for c in [self._out_channels[0], self._out_channels[1]]
+ ]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/centernet_fpn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/centernet_fpn.py
new file mode 100644
index 000000000..df5ced2e7
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/centernet_fpn.py
@@ -0,0 +1,420 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import math
+import paddle
+import paddle.nn as nn
+from paddle import ParamAttr
+from paddle.nn.initializer import Uniform
+import paddle.nn.functional as F
+from ppdet.core.workspace import register, serializable
+from ppdet.modeling.layers import ConvNormLayer
+from ppdet.modeling.backbones.hardnet import ConvLayer, HarDBlock
+from ..shape_spec import ShapeSpec
+
+__all__ = ['CenterNetDLAFPN', 'CenterNetHarDNetFPN']
+
+
+# SGE attention
+class BasicConv(nn.Layer):
+ def __init__(self,
+ in_planes,
+ out_planes,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ relu=True,
+ bn=True,
+ bias_attr=False):
+ super(BasicConv, self).__init__()
+ self.out_channels = out_planes
+ self.conv = nn.Conv2D(
+ in_planes,
+ out_planes,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups,
+ bias_attr=bias_attr)
+ self.bn = nn.BatchNorm2D(
+ out_planes,
+ epsilon=1e-5,
+ momentum=0.01,
+ weight_attr=False,
+ bias_attr=False) if bn else None
+ self.relu = nn.ReLU() if relu else None
+
+ def forward(self, x):
+ x = self.conv(x)
+ if self.bn is not None:
+ x = self.bn(x)
+ if self.relu is not None:
+ x = self.relu(x)
+ return x
+
+
+class ChannelPool(nn.Layer):
+ def forward(self, x):
+ return paddle.concat(
+ (paddle.max(x, 1).unsqueeze(1), paddle.mean(x, 1).unsqueeze(1)),
+ axis=1)
+
+
+class SpatialGate(nn.Layer):
+ def __init__(self):
+ super(SpatialGate, self).__init__()
+ kernel_size = 7
+ self.compress = ChannelPool()
+ self.spatial = BasicConv(
+ 2,
+ 1,
+ kernel_size,
+ stride=1,
+ padding=(kernel_size - 1) // 2,
+ relu=False)
+
+ def forward(self, x):
+ x_compress = self.compress(x)
+ x_out = self.spatial(x_compress)
+ scale = F.sigmoid(x_out) # broadcasting
+ return x * scale
+
+
+def fill_up_weights(up):
+ weight = up.weight.numpy()
+ f = math.ceil(weight.shape[2] / 2)
+ c = (2 * f - 1 - f % 2) / (2. * f)
+ for i in range(weight.shape[2]):
+ for j in range(weight.shape[3]):
+ weight[0, 0, i, j] = \
+ (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
+ for c in range(1, weight.shape[0]):
+ weight[c, 0, :, :] = weight[0, 0, :, :]
+ up.weight.set_value(weight)
+
+
+class IDAUp(nn.Layer):
+ def __init__(self, ch_ins, ch_out, up_strides, dcn_v2=True):
+ super(IDAUp, self).__init__()
+ for i in range(1, len(ch_ins)):
+ ch_in = ch_ins[i]
+ up_s = int(up_strides[i])
+ fan_in = ch_in * 3 * 3
+ stdv = 1. / math.sqrt(fan_in)
+ proj = nn.Sequential(
+ ConvNormLayer(
+ ch_in,
+ ch_out,
+ filter_size=3,
+ stride=1,
+ use_dcn=dcn_v2,
+ bias_on=dcn_v2,
+ norm_decay=None,
+ dcn_lr_scale=1.,
+ dcn_regularizer=None,
+ initializer=Uniform(-stdv, stdv)),
+ nn.ReLU())
+ node = nn.Sequential(
+ ConvNormLayer(
+ ch_out,
+ ch_out,
+ filter_size=3,
+ stride=1,
+ use_dcn=dcn_v2,
+ bias_on=dcn_v2,
+ norm_decay=None,
+ dcn_lr_scale=1.,
+ dcn_regularizer=None,
+ initializer=Uniform(-stdv, stdv)),
+ nn.ReLU())
+
+ kernel_size = up_s * 2
+ fan_in = ch_out * kernel_size * kernel_size
+ stdv = 1. / math.sqrt(fan_in)
+ up = nn.Conv2DTranspose(
+ ch_out,
+ ch_out,
+ kernel_size=up_s * 2,
+ stride=up_s,
+ padding=up_s // 2,
+ groups=ch_out,
+ weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
+ bias_attr=False)
+ fill_up_weights(up)
+ setattr(self, 'proj_' + str(i), proj)
+ setattr(self, 'up_' + str(i), up)
+ setattr(self, 'node_' + str(i), node)
+
+ def forward(self, inputs, start_level, end_level):
+ for i in range(start_level + 1, end_level):
+ upsample = getattr(self, 'up_' + str(i - start_level))
+ project = getattr(self, 'proj_' + str(i - start_level))
+
+ inputs[i] = project(inputs[i])
+ inputs[i] = upsample(inputs[i])
+ node = getattr(self, 'node_' + str(i - start_level))
+ inputs[i] = node(paddle.add(inputs[i], inputs[i - 1]))
+
+
+class DLAUp(nn.Layer):
+ def __init__(self, start_level, channels, scales, ch_in=None, dcn_v2=True):
+ super(DLAUp, self).__init__()
+ self.start_level = start_level
+ if ch_in is None:
+ ch_in = channels
+ self.channels = channels
+ channels = list(channels)
+ scales = np.array(scales, dtype=int)
+ for i in range(len(channels) - 1):
+ j = -i - 2
+ setattr(
+ self,
+ 'ida_{}'.format(i),
+ IDAUp(
+ ch_in[j:],
+ channels[j],
+ scales[j:] // scales[j],
+ dcn_v2=dcn_v2))
+ scales[j + 1:] = scales[j]
+ ch_in[j + 1:] = [channels[j] for _ in channels[j + 1:]]
+
+ def forward(self, inputs):
+ out = [inputs[-1]] # start with 32
+ for i in range(len(inputs) - self.start_level - 1):
+ ida = getattr(self, 'ida_{}'.format(i))
+ ida(inputs, len(inputs) - i - 2, len(inputs))
+ out.insert(0, inputs[-1])
+ return out
+
+
+@register
+@serializable
+class CenterNetDLAFPN(nn.Layer):
+ """
+ Args:
+ in_channels (list): number of input feature channels from backbone.
+ [16, 32, 64, 128, 256, 512] by default, means the channels of DLA-34
+ down_ratio (int): the down ratio from images to heatmap, 4 by default
+ last_level (int): the last level of input feature fed into the upsamplng block
+ out_channel (int): the channel of the output feature, 0 by default means
+ the channel of the input feature whose down ratio is `down_ratio`
+ first_level (None): the first level of input feature fed into the upsamplng block.
+ if None, the first level stands for logs(down_ratio)
+ dcn_v2 (bool): whether use the DCNv2, True by default
+ with_sge (bool): whether use SGE attention, False by default
+ """
+
+ def __init__(self,
+ in_channels,
+ down_ratio=4,
+ last_level=5,
+ out_channel=0,
+ first_level=None,
+ dcn_v2=True,
+ with_sge=False):
+ super(CenterNetDLAFPN, self).__init__()
+ self.first_level = int(np.log2(
+ down_ratio)) if first_level is None else first_level
+ assert self.first_level >= 0, "first level in CenterNetDLAFPN should be greater or equal to 0, but received {}".format(
+ self.first_level)
+ self.down_ratio = down_ratio
+ self.last_level = last_level
+ scales = [2**i for i in range(len(in_channels[self.first_level:]))]
+ self.dla_up = DLAUp(
+ self.first_level,
+ in_channels[self.first_level:],
+ scales,
+ dcn_v2=dcn_v2)
+ self.out_channel = out_channel
+ if out_channel == 0:
+ self.out_channel = in_channels[self.first_level]
+ self.ida_up = IDAUp(
+ in_channels[self.first_level:self.last_level],
+ self.out_channel,
+ [2**i for i in range(self.last_level - self.first_level)],
+ dcn_v2=dcn_v2)
+
+ self.with_sge = with_sge
+ if self.with_sge:
+ self.sge_attention = SpatialGate()
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape]}
+
+ def forward(self, body_feats):
+
+ dla_up_feats = self.dla_up(body_feats)
+
+ ida_up_feats = []
+ for i in range(self.last_level - self.first_level):
+ ida_up_feats.append(dla_up_feats[i].clone())
+
+ self.ida_up(ida_up_feats, 0, len(ida_up_feats))
+
+ feat = ida_up_feats[-1]
+ if self.with_sge:
+ feat = self.sge_attention(feat)
+ if self.down_ratio != 4:
+ feat = F.interpolate(feat, scale_factor=self.down_ratio // 4, mode="bilinear", align_corners=True)
+ return feat
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=self.out_channel, stride=self.down_ratio)]
+
+
+class TransitionUp(nn.Layer):
+ def __init__(self, in_channels, out_channels):
+ super().__init__()
+
+ def forward(self, x, skip):
+ w, h = skip.shape[2], skip.shape[3]
+ out = F.interpolate(x, size=(w, h), mode="bilinear", align_corners=True)
+ out = paddle.concat([out, skip], 1)
+ return out
+
+
+@register
+@serializable
+class CenterNetHarDNetFPN(nn.Layer):
+ """
+ Args:
+ in_channels (list): number of input feature channels from backbone.
+ [96, 214, 458, 784] by default, means the channels of HarDNet85
+ num_layers (int): HarDNet laters, 85 by default
+ down_ratio (int): the down ratio from images to heatmap, 4 by default
+ first_level (int|None): the first level of input feature fed into the upsamplng block.
+ if None, the first level stands for logs(down_ratio) - 1
+
+ last_level (int): the last level of input feature fed into the upsamplng block
+ out_channel (int): the channel of the output feature, 0 by default means
+ the channel of the input feature whose down ratio is `down_ratio`
+ """
+
+ def __init__(self,
+ in_channels,
+ num_layers=85,
+ down_ratio=4,
+ first_level=None,
+ last_level=4,
+ out_channel=0):
+ super(CenterNetHarDNetFPN, self).__init__()
+ self.first_level = int(np.log2(
+ down_ratio)) - 1 if first_level is None else first_level
+ assert self.first_level >= 0, "first level in CenterNetDLAFPN should be greater or equal to 0, but received {}".format(
+ self.first_level)
+ self.down_ratio = down_ratio
+ self.last_level = last_level
+ self.last_pool = nn.AvgPool2D(kernel_size=2, stride=2)
+
+ assert num_layers in [68, 85], "HarDNet-{} not support.".format(
+ num_layers)
+ if num_layers == 85:
+ self.last_proj = ConvLayer(784, 256, kernel_size=1)
+ self.last_blk = HarDBlock(768, 80, 1.7, 8)
+ self.skip_nodes = [1, 3, 8, 13]
+ self.SC = [32, 32, 0]
+ gr = [64, 48, 28]
+ layers = [8, 8, 4]
+ ch_list2 = [224 + self.SC[0], 160 + self.SC[1], 96 + self.SC[2]]
+ channels = [96, 214, 458, 784]
+ self.skip_lv = 3
+
+ elif num_layers == 68:
+ self.last_proj = ConvLayer(654, 192, kernel_size=1)
+ self.last_blk = HarDBlock(576, 72, 1.7, 8)
+ self.skip_nodes = [1, 3, 8, 11]
+ self.SC = [32, 32, 0]
+ gr = [48, 32, 20]
+ layers = [8, 8, 4]
+ ch_list2 = [224 + self.SC[0], 96 + self.SC[1], 64 + self.SC[2]]
+ channels = [64, 124, 328, 654]
+ self.skip_lv = 2
+
+ self.transUpBlocks = nn.LayerList([])
+ self.denseBlocksUp = nn.LayerList([])
+ self.conv1x1_up = nn.LayerList([])
+ self.avg9x9 = nn.AvgPool2D(kernel_size=(9, 9), stride=1, padding=(4, 4))
+ prev_ch = self.last_blk.get_out_ch()
+
+ for i in range(3):
+ skip_ch = channels[3 - i]
+ self.transUpBlocks.append(TransitionUp(prev_ch, prev_ch))
+ if i < self.skip_lv:
+ cur_ch = prev_ch + skip_ch
+ else:
+ cur_ch = prev_ch
+ self.conv1x1_up.append(
+ ConvLayer(
+ cur_ch, ch_list2[i], kernel_size=1))
+ cur_ch = ch_list2[i]
+ cur_ch -= self.SC[i]
+ cur_ch *= 3
+
+ blk = HarDBlock(cur_ch, gr[i], 1.7, layers[i])
+ self.denseBlocksUp.append(blk)
+ prev_ch = blk.get_out_ch()
+
+ prev_ch += self.SC[0] + self.SC[1] + self.SC[2]
+ self.out_channel = prev_ch
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape]}
+
+ def forward(self, body_feats):
+ x = body_feats[-1]
+ x_sc = []
+ x = self.last_proj(x)
+ x = self.last_pool(x)
+ x2 = self.avg9x9(x)
+ x3 = x / (x.sum((2, 3), keepdim=True) + 0.1)
+ x = paddle.concat([x, x2, x3], 1)
+ x = self.last_blk(x)
+
+ for i in range(3):
+ skip_x = body_feats[3 - i]
+ x_up = self.transUpBlocks[i](x, skip_x)
+ x_ch = self.conv1x1_up[i](x_up)
+ if self.SC[i] > 0:
+ end = x_ch.shape[1]
+ new_st = end - self.SC[i]
+ x_sc.append(x_ch[:, new_st:, :, :])
+ x_ch = x_ch[:, :new_st, :, :]
+ x2 = self.avg9x9(x_ch)
+ x3 = x_ch / (x_ch.sum((2, 3), keepdim=True) + 0.1)
+ x_new = paddle.concat([x_ch, x2, x3], 1)
+ x = self.denseBlocksUp[i](x_new)
+
+ scs = [x]
+ for i in range(3):
+ if self.SC[i] > 0:
+ scs.insert(
+ 0,
+ F.interpolate(
+ x_sc[i],
+ size=(x.shape[2], x.shape[3]),
+ mode="bilinear",
+ align_corners=True))
+ neck_feat = paddle.concat(scs, 1)
+ return neck_feat
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=self.out_channel, stride=self.down_ratio)]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/csp_pan.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/csp_pan.py
new file mode 100644
index 000000000..7417c46ab
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/csp_pan.py
@@ -0,0 +1,364 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on:
+# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/yolox_pafpn.py
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.regularizer import L2Decay
+from ppdet.core.workspace import register, serializable
+from ..shape_spec import ShapeSpec
+
+__all__ = ['CSPPAN']
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ in_channel=96,
+ out_channel=96,
+ kernel_size=3,
+ stride=1,
+ groups=1,
+ act='leaky_relu'):
+ super(ConvBNLayer, self).__init__()
+ initializer = nn.initializer.KaimingUniform()
+ self.act = act
+ assert self.act in ['leaky_relu', "hard_swish"]
+ self.conv = nn.Conv2D(
+ in_channels=in_channel,
+ out_channels=out_channel,
+ kernel_size=kernel_size,
+ groups=groups,
+ padding=(kernel_size - 1) // 2,
+ stride=stride,
+ weight_attr=ParamAttr(initializer=initializer),
+ bias_attr=False)
+ self.bn = nn.BatchNorm2D(out_channel)
+
+ def forward(self, x):
+ x = self.bn(self.conv(x))
+ if self.act == "leaky_relu":
+ x = F.leaky_relu(x)
+ elif self.act == "hard_swish":
+ x = F.hardswish(x)
+ return x
+
+
+class DPModule(nn.Layer):
+ """
+ Depth-wise and point-wise module.
+ Args:
+ in_channel (int): The input channels of this Module.
+ out_channel (int): The output channels of this Module.
+ kernel_size (int): The conv2d kernel size of this Module.
+ stride (int): The conv2d's stride of this Module.
+ act (str): The activation function of this Module,
+ Now support `leaky_relu` and `hard_swish`.
+ """
+
+ def __init__(self,
+ in_channel=96,
+ out_channel=96,
+ kernel_size=3,
+ stride=1,
+ act='leaky_relu'):
+ super(DPModule, self).__init__()
+ initializer = nn.initializer.KaimingUniform()
+ self.act = act
+ self.dwconv = nn.Conv2D(
+ in_channels=in_channel,
+ out_channels=out_channel,
+ kernel_size=kernel_size,
+ groups=out_channel,
+ padding=(kernel_size - 1) // 2,
+ stride=stride,
+ weight_attr=ParamAttr(initializer=initializer),
+ bias_attr=False)
+ self.bn1 = nn.BatchNorm2D(out_channel)
+ self.pwconv = nn.Conv2D(
+ in_channels=out_channel,
+ out_channels=out_channel,
+ kernel_size=1,
+ groups=1,
+ padding=0,
+ weight_attr=ParamAttr(initializer=initializer),
+ bias_attr=False)
+ self.bn2 = nn.BatchNorm2D(out_channel)
+
+ def act_func(self, x):
+ if self.act == "leaky_relu":
+ x = F.leaky_relu(x)
+ elif self.act == "hard_swish":
+ x = F.hardswish(x)
+ return x
+
+ def forward(self, x):
+ x = self.act_func(self.bn1(self.dwconv(x)))
+ x = self.act_func(self.bn2(self.pwconv(x)))
+ return x
+
+
+class DarknetBottleneck(nn.Layer):
+ """The basic bottleneck block used in Darknet.
+
+ Each Block consists of two ConvModules and the input is added to the
+ final output. Each ConvModule is composed of Conv, BN, and act.
+ The first convLayer has filter size of 1x1 and the second one has the
+ filter size of 3x3.
+
+ Args:
+ in_channels (int): The input channels of this Module.
+ out_channels (int): The output channels of this Module.
+ expansion (int): The kernel size of the convolution. Default: 0.5
+ add_identity (bool): Whether to add identity to the out.
+ Default: True
+ use_depthwise (bool): Whether to use depthwise separable convolution.
+ Default: False
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ expansion=0.5,
+ add_identity=True,
+ use_depthwise=False,
+ act="leaky_relu"):
+ super(DarknetBottleneck, self).__init__()
+ hidden_channels = int(out_channels * expansion)
+ conv_func = DPModule if use_depthwise else ConvBNLayer
+ self.conv1 = ConvBNLayer(
+ in_channel=in_channels,
+ out_channel=hidden_channels,
+ kernel_size=1,
+ act=act)
+ self.conv2 = conv_func(
+ in_channel=hidden_channels,
+ out_channel=out_channels,
+ kernel_size=kernel_size,
+ stride=1,
+ act=act)
+ self.add_identity = \
+ add_identity and in_channels == out_channels
+
+ def forward(self, x):
+ identity = x
+ out = self.conv1(x)
+ out = self.conv2(out)
+
+ if self.add_identity:
+ return out + identity
+ else:
+ return out
+
+
+class CSPLayer(nn.Layer):
+ """Cross Stage Partial Layer.
+
+ Args:
+ in_channels (int): The input channels of the CSP layer.
+ out_channels (int): The output channels of the CSP layer.
+ expand_ratio (float): Ratio to adjust the number of channels of the
+ hidden layer. Default: 0.5
+ num_blocks (int): Number of blocks. Default: 1
+ add_identity (bool): Whether to add identity in blocks.
+ Default: True
+ use_depthwise (bool): Whether to depthwise separable convolution in
+ blocks. Default: False
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ expand_ratio=0.5,
+ num_blocks=1,
+ add_identity=True,
+ use_depthwise=False,
+ act="leaky_relu"):
+ super().__init__()
+ mid_channels = int(out_channels * expand_ratio)
+ self.main_conv = ConvBNLayer(in_channels, mid_channels, 1, act=act)
+ self.short_conv = ConvBNLayer(in_channels, mid_channels, 1, act=act)
+ self.final_conv = ConvBNLayer(
+ 2 * mid_channels, out_channels, 1, act=act)
+
+ self.blocks = nn.Sequential(* [
+ DarknetBottleneck(
+ mid_channels,
+ mid_channels,
+ kernel_size,
+ 1.0,
+ add_identity,
+ use_depthwise,
+ act=act) for _ in range(num_blocks)
+ ])
+
+ def forward(self, x):
+ x_short = self.short_conv(x)
+
+ x_main = self.main_conv(x)
+ x_main = self.blocks(x_main)
+
+ x_final = paddle.concat((x_main, x_short), axis=1)
+ return self.final_conv(x_final)
+
+
+class Channel_T(nn.Layer):
+ def __init__(self,
+ in_channels=[116, 232, 464],
+ out_channels=96,
+ act="leaky_relu"):
+ super(Channel_T, self).__init__()
+ self.convs = nn.LayerList()
+ for i in range(len(in_channels)):
+ self.convs.append(
+ ConvBNLayer(
+ in_channels[i], out_channels, 1, act=act))
+
+ def forward(self, x):
+ outs = [self.convs[i](x[i]) for i in range(len(x))]
+ return outs
+
+
+@register
+@serializable
+class CSPPAN(nn.Layer):
+ """Path Aggregation Network with CSP module.
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale)
+ kernel_size (int): The conv2d kernel size of this Module.
+ num_features (int): Number of output features of CSPPAN module.
+ num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 1
+ use_depthwise (bool): Whether to depthwise separable convolution in
+ blocks. Default: True
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=5,
+ num_features=3,
+ num_csp_blocks=1,
+ use_depthwise=True,
+ act='hard_swish',
+ spatial_scales=[0.125, 0.0625, 0.03125]):
+ super(CSPPAN, self).__init__()
+ self.conv_t = Channel_T(in_channels, out_channels, act=act)
+ in_channels = [out_channels] * len(spatial_scales)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.spatial_scales = spatial_scales
+ self.num_features = num_features
+ conv_func = DPModule if use_depthwise else ConvBNLayer
+
+ if self.num_features == 4:
+ self.first_top_conv = conv_func(
+ in_channels[0], in_channels[0], kernel_size, stride=2, act=act)
+ self.second_top_conv = conv_func(
+ in_channels[0], in_channels[0], kernel_size, stride=2, act=act)
+ self.spatial_scales.append(self.spatial_scales[-1] / 2)
+
+ # build top-down blocks
+ self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
+ self.top_down_blocks = nn.LayerList()
+ for idx in range(len(in_channels) - 1, 0, -1):
+ self.top_down_blocks.append(
+ CSPLayer(
+ in_channels[idx - 1] * 2,
+ in_channels[idx - 1],
+ kernel_size=kernel_size,
+ num_blocks=num_csp_blocks,
+ add_identity=False,
+ use_depthwise=use_depthwise,
+ act=act))
+
+ # build bottom-up blocks
+ self.downsamples = nn.LayerList()
+ self.bottom_up_blocks = nn.LayerList()
+ for idx in range(len(in_channels) - 1):
+ self.downsamples.append(
+ conv_func(
+ in_channels[idx],
+ in_channels[idx],
+ kernel_size=kernel_size,
+ stride=2,
+ act=act))
+ self.bottom_up_blocks.append(
+ CSPLayer(
+ in_channels[idx] * 2,
+ in_channels[idx + 1],
+ kernel_size=kernel_size,
+ num_blocks=num_csp_blocks,
+ add_identity=False,
+ use_depthwise=use_depthwise,
+ act=act))
+
+ def forward(self, inputs):
+ """
+ Args:
+ inputs (tuple[Tensor]): input features.
+
+ Returns:
+ tuple[Tensor]: CSPPAN features.
+ """
+ assert len(inputs) == len(self.in_channels)
+ inputs = self.conv_t(inputs)
+
+ # top-down path
+ inner_outs = [inputs[-1]]
+ for idx in range(len(self.in_channels) - 1, 0, -1):
+ feat_heigh = inner_outs[0]
+ feat_low = inputs[idx - 1]
+
+ upsample_feat = self.upsample(feat_heigh)
+
+ inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
+ paddle.concat([upsample_feat, feat_low], 1))
+ inner_outs.insert(0, inner_out)
+
+ # bottom-up path
+ outs = [inner_outs[0]]
+ for idx in range(len(self.in_channels) - 1):
+ feat_low = outs[-1]
+ feat_height = inner_outs[idx + 1]
+ downsample_feat = self.downsamples[idx](feat_low)
+ out = self.bottom_up_blocks[idx](paddle.concat(
+ [downsample_feat, feat_height], 1))
+ outs.append(out)
+
+ top_features = None
+ if self.num_features == 4:
+ top_features = self.first_top_conv(inputs[-1])
+ top_features = top_features + self.second_top_conv(outs[-1])
+ outs.append(top_features)
+
+ return tuple(outs)
+
+ @property
+ def out_shape(self):
+ return [
+ ShapeSpec(
+ channels=self.out_channels, stride=1. / s)
+ for s in self.spatial_scales
+ ]
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape], }
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/fpn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/fpn.py
new file mode 100644
index 000000000..0633fb5b2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/fpn.py
@@ -0,0 +1,231 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import XavierUniform
+
+from ppdet.core.workspace import register, serializable
+from ppdet.modeling.layers import ConvNormLayer
+from ..shape_spec import ShapeSpec
+
+__all__ = ['FPN']
+
+
+@register
+@serializable
+class FPN(nn.Layer):
+ """
+ Feature Pyramid Network, see https://arxiv.org/abs/1612.03144
+
+ Args:
+ in_channels (list[int]): input channels of each level which can be
+ derived from the output shape of backbone by from_config
+ out_channel (list[int]): output channel of each level
+ spatial_scales (list[float]): the spatial scales between input feature
+ maps and original input image which can be derived from the output
+ shape of backbone by from_config
+ has_extra_convs (bool): whether to add extra conv to the last level.
+ default False
+ extra_stage (int): the number of extra stages added to the last level.
+ default 1
+ use_c5 (bool): Whether to use c5 as the input of extra stage,
+ otherwise p5 is used. default True
+ norm_type (string|None): The normalization type in FPN module. If
+ norm_type is None, norm will not be used after conv and if
+ norm_type is string, bn, gn, sync_bn are available. default None
+ norm_decay (float): weight decay for normalization layer weights.
+ default 0.
+ freeze_norm (bool): whether to freeze normalization layer.
+ default False
+ relu_before_extra_convs (bool): whether to add relu before extra convs.
+ default False
+
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channel,
+ spatial_scales=[0.25, 0.125, 0.0625, 0.03125],
+ has_extra_convs=False,
+ extra_stage=1,
+ use_c5=True,
+ norm_type=None,
+ norm_decay=0.,
+ freeze_norm=False,
+ relu_before_extra_convs=True):
+ super(FPN, self).__init__()
+ self.out_channel = out_channel
+ for s in range(extra_stage):
+ spatial_scales = spatial_scales + [spatial_scales[-1] / 2.]
+ self.spatial_scales = spatial_scales
+ self.has_extra_convs = has_extra_convs
+ self.extra_stage = extra_stage
+ self.use_c5 = use_c5
+ self.relu_before_extra_convs = relu_before_extra_convs
+ self.norm_type = norm_type
+ self.norm_decay = norm_decay
+ self.freeze_norm = freeze_norm
+
+ self.lateral_convs = []
+ self.fpn_convs = []
+ fan = out_channel * 3 * 3
+
+ # stage index 0,1,2,3 stands for res2,res3,res4,res5 on ResNet Backbone
+ # 0 <= st_stage < ed_stage <= 3
+ st_stage = 4 - len(in_channels)
+ ed_stage = st_stage + len(in_channels) - 1
+ for i in range(st_stage, ed_stage + 1):
+ if i == 3:
+ lateral_name = 'fpn_inner_res5_sum'
+ else:
+ lateral_name = 'fpn_inner_res{}_sum_lateral'.format(i + 2)
+ in_c = in_channels[i - st_stage]
+ if self.norm_type is not None:
+ lateral = self.add_sublayer(
+ lateral_name,
+ ConvNormLayer(
+ ch_in=in_c,
+ ch_out=out_channel,
+ filter_size=1,
+ stride=1,
+ norm_type=self.norm_type,
+ norm_decay=self.norm_decay,
+ freeze_norm=self.freeze_norm,
+ initializer=XavierUniform(fan_out=in_c)))
+ else:
+ lateral = self.add_sublayer(
+ lateral_name,
+ nn.Conv2D(
+ in_channels=in_c,
+ out_channels=out_channel,
+ kernel_size=1,
+ weight_attr=ParamAttr(
+ initializer=XavierUniform(fan_out=in_c))))
+ self.lateral_convs.append(lateral)
+
+ fpn_name = 'fpn_res{}_sum'.format(i + 2)
+ if self.norm_type is not None:
+ fpn_conv = self.add_sublayer(
+ fpn_name,
+ ConvNormLayer(
+ ch_in=out_channel,
+ ch_out=out_channel,
+ filter_size=3,
+ stride=1,
+ norm_type=self.norm_type,
+ norm_decay=self.norm_decay,
+ freeze_norm=self.freeze_norm,
+ initializer=XavierUniform(fan_out=fan)))
+ else:
+ fpn_conv = self.add_sublayer(
+ fpn_name,
+ nn.Conv2D(
+ in_channels=out_channel,
+ out_channels=out_channel,
+ kernel_size=3,
+ padding=1,
+ weight_attr=ParamAttr(
+ initializer=XavierUniform(fan_out=fan))))
+ self.fpn_convs.append(fpn_conv)
+
+ # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
+ if self.has_extra_convs:
+ for i in range(self.extra_stage):
+ lvl = ed_stage + 1 + i
+ if i == 0 and self.use_c5:
+ in_c = in_channels[-1]
+ else:
+ in_c = out_channel
+ extra_fpn_name = 'fpn_{}'.format(lvl + 2)
+ if self.norm_type is not None:
+ extra_fpn_conv = self.add_sublayer(
+ extra_fpn_name,
+ ConvNormLayer(
+ ch_in=in_c,
+ ch_out=out_channel,
+ filter_size=3,
+ stride=2,
+ norm_type=self.norm_type,
+ norm_decay=self.norm_decay,
+ freeze_norm=self.freeze_norm,
+ initializer=XavierUniform(fan_out=fan)))
+ else:
+ extra_fpn_conv = self.add_sublayer(
+ extra_fpn_name,
+ nn.Conv2D(
+ in_channels=in_c,
+ out_channels=out_channel,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ weight_attr=ParamAttr(
+ initializer=XavierUniform(fan_out=fan))))
+ self.fpn_convs.append(extra_fpn_conv)
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {
+ 'in_channels': [i.channels for i in input_shape],
+ 'spatial_scales': [1.0 / i.stride for i in input_shape],
+ }
+
+ def forward(self, body_feats):
+ laterals = []
+ num_levels = len(body_feats)
+ for i in range(num_levels):
+ laterals.append(self.lateral_convs[i](body_feats[i]))
+
+ for i in range(1, num_levels):
+ lvl = num_levels - i
+ upsample = F.interpolate(
+ laterals[lvl],
+ scale_factor=2.,
+ mode='nearest', )
+ laterals[lvl - 1] += upsample
+
+ fpn_output = []
+ for lvl in range(num_levels):
+ fpn_output.append(self.fpn_convs[lvl](laterals[lvl]))
+
+ if self.extra_stage > 0:
+ # use max pool to get more levels on top of outputs (Faster R-CNN, Mask R-CNN)
+ if not self.has_extra_convs:
+ assert self.extra_stage == 1, 'extra_stage should be 1 if FPN has not extra convs'
+ fpn_output.append(F.max_pool2d(fpn_output[-1], 1, stride=2))
+ # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
+ else:
+ if self.use_c5:
+ extra_source = body_feats[-1]
+ else:
+ extra_source = fpn_output[-1]
+ fpn_output.append(self.fpn_convs[num_levels](extra_source))
+
+ for i in range(1, self.extra_stage):
+ if self.relu_before_extra_convs:
+ fpn_output.append(self.fpn_convs[num_levels + i](F.relu(
+ fpn_output[-1])))
+ else:
+ fpn_output.append(self.fpn_convs[num_levels + i](
+ fpn_output[-1]))
+ return fpn_output
+
+ @property
+ def out_shape(self):
+ return [
+ ShapeSpec(
+ channels=self.out_channel, stride=1. / s)
+ for s in self.spatial_scales
+ ]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/hrfpn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/hrfpn.py
new file mode 100644
index 000000000..eb4768b8e
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/hrfpn.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn.functional as F
+import paddle.nn as nn
+from ppdet.core.workspace import register
+from ..shape_spec import ShapeSpec
+
+__all__ = ['HRFPN']
+
+
+@register
+class HRFPN(nn.Layer):
+ """
+ Args:
+ in_channels (list): number of input feature channels from backbone
+ out_channel (int): number of output feature channels
+ share_conv (bool): whether to share conv for different layers' reduction
+ extra_stage (int): add extra stage for returning HRFPN fpn_feats
+ spatial_scales (list): feature map scaling factor
+ """
+
+ def __init__(self,
+ in_channels=[18, 36, 72, 144],
+ out_channel=256,
+ share_conv=False,
+ extra_stage=1,
+ spatial_scales=[1. / 4, 1. / 8, 1. / 16, 1. / 32]):
+ super(HRFPN, self).__init__()
+ in_channel = sum(in_channels)
+ self.in_channel = in_channel
+ self.out_channel = out_channel
+ self.share_conv = share_conv
+ for i in range(extra_stage):
+ spatial_scales = spatial_scales + [spatial_scales[-1] / 2.]
+ self.spatial_scales = spatial_scales
+ self.num_out = len(self.spatial_scales)
+
+ self.reduction = nn.Conv2D(
+ in_channels=in_channel,
+ out_channels=out_channel,
+ kernel_size=1,
+ bias_attr=False)
+
+ if share_conv:
+ self.fpn_conv = nn.Conv2D(
+ in_channels=out_channel,
+ out_channels=out_channel,
+ kernel_size=3,
+ padding=1,
+ bias_attr=False)
+ else:
+ self.fpn_conv = []
+ for i in range(self.num_out):
+ conv_name = "fpn_conv_" + str(i)
+ conv = self.add_sublayer(
+ conv_name,
+ nn.Conv2D(
+ in_channels=out_channel,
+ out_channels=out_channel,
+ kernel_size=3,
+ padding=1,
+ bias_attr=False))
+ self.fpn_conv.append(conv)
+
+ def forward(self, body_feats):
+ num_backbone_stages = len(body_feats)
+
+ outs = []
+ outs.append(body_feats[0])
+
+ # resize
+ for i in range(1, num_backbone_stages):
+ resized = F.interpolate(
+ body_feats[i], scale_factor=2**i, mode='bilinear')
+ outs.append(resized)
+
+ # concat
+ out = paddle.concat(outs, axis=1)
+ assert out.shape[
+ 1] == self.in_channel, 'in_channel should be {}, be received {}'.format(
+ out.shape[1], self.in_channel)
+
+ # reduction
+ out = self.reduction(out)
+
+ # conv
+ outs = [out]
+ for i in range(1, self.num_out):
+ outs.append(F.avg_pool2d(out, kernel_size=2**i, stride=2**i))
+ outputs = []
+
+ for i in range(self.num_out):
+ conv_func = self.fpn_conv if self.share_conv else self.fpn_conv[i]
+ conv = conv_func(outs[i])
+ outputs.append(conv)
+
+ fpn_feats = [outputs[k] for k in range(self.num_out)]
+ return fpn_feats
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {
+ 'in_channels': [i.channels for i in input_shape],
+ 'spatial_scales': [1.0 / i.stride for i in input_shape],
+ }
+
+ @property
+ def out_shape(self):
+ return [
+ ShapeSpec(
+ channels=self.out_channel, stride=1. / s)
+ for s in self.spatial_scales
+ ]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/ttf_fpn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/ttf_fpn.py
new file mode 100644
index 000000000..60cc69f80
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/ttf_fpn.py
@@ -0,0 +1,242 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import Constant, Uniform, Normal, XavierUniform
+from ppdet.core.workspace import register, serializable
+from paddle.regularizer import L2Decay
+from ppdet.modeling.layers import DeformableConvV2, ConvNormLayer, LiteConv
+import math
+from ppdet.modeling.ops import batch_norm
+from ..shape_spec import ShapeSpec
+
+__all__ = ['TTFFPN']
+
+
+class Upsample(nn.Layer):
+ def __init__(self, ch_in, ch_out, norm_type='bn'):
+ super(Upsample, self).__init__()
+ fan_in = ch_in * 3 * 3
+ stdv = 1. / math.sqrt(fan_in)
+ self.dcn = DeformableConvV2(
+ ch_in,
+ ch_out,
+ kernel_size=3,
+ weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
+ bias_attr=ParamAttr(
+ initializer=Constant(0),
+ regularizer=L2Decay(0.),
+ learning_rate=2.),
+ lr_scale=2.,
+ regularizer=L2Decay(0.))
+
+ self.bn = batch_norm(
+ ch_out, norm_type=norm_type, initializer=Constant(1.))
+
+ def forward(self, feat):
+ dcn = self.dcn(feat)
+ bn = self.bn(dcn)
+ relu = F.relu(bn)
+ out = F.interpolate(relu, scale_factor=2., mode='bilinear')
+ return out
+
+
+class DeConv(nn.Layer):
+ def __init__(self, ch_in, ch_out, norm_type='bn'):
+ super(DeConv, self).__init__()
+ self.deconv = nn.Sequential()
+ conv1 = ConvNormLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ stride=1,
+ filter_size=1,
+ norm_type=norm_type,
+ initializer=XavierUniform())
+ conv2 = nn.Conv2DTranspose(
+ in_channels=ch_out,
+ out_channels=ch_out,
+ kernel_size=4,
+ padding=1,
+ stride=2,
+ groups=ch_out,
+ weight_attr=ParamAttr(initializer=XavierUniform()),
+ bias_attr=False)
+ bn = batch_norm(ch_out, norm_type=norm_type, norm_decay=0.)
+ conv3 = ConvNormLayer(
+ ch_in=ch_out,
+ ch_out=ch_out,
+ stride=1,
+ filter_size=1,
+ norm_type=norm_type,
+ initializer=XavierUniform())
+
+ self.deconv.add_sublayer('conv1', conv1)
+ self.deconv.add_sublayer('relu6_1', nn.ReLU6())
+ self.deconv.add_sublayer('conv2', conv2)
+ self.deconv.add_sublayer('bn', bn)
+ self.deconv.add_sublayer('relu6_2', nn.ReLU6())
+ self.deconv.add_sublayer('conv3', conv3)
+ self.deconv.add_sublayer('relu6_3', nn.ReLU6())
+
+ def forward(self, inputs):
+ return self.deconv(inputs)
+
+
+class LiteUpsample(nn.Layer):
+ def __init__(self, ch_in, ch_out, norm_type='bn'):
+ super(LiteUpsample, self).__init__()
+ self.deconv = DeConv(ch_in, ch_out, norm_type=norm_type)
+ self.conv = LiteConv(ch_in, ch_out, norm_type=norm_type)
+
+ def forward(self, inputs):
+ deconv_up = self.deconv(inputs)
+ conv = self.conv(inputs)
+ interp_up = F.interpolate(conv, scale_factor=2., mode='bilinear')
+ return deconv_up + interp_up
+
+
+class ShortCut(nn.Layer):
+ def __init__(self,
+ layer_num,
+ ch_in,
+ ch_out,
+ norm_type='bn',
+ lite_neck=False,
+ name=None):
+ super(ShortCut, self).__init__()
+ shortcut_conv = nn.Sequential()
+ for i in range(layer_num):
+ fan_out = 3 * 3 * ch_out
+ std = math.sqrt(2. / fan_out)
+ in_channels = ch_in if i == 0 else ch_out
+ shortcut_name = name + '.conv.{}'.format(i)
+ if lite_neck:
+ shortcut_conv.add_sublayer(
+ shortcut_name,
+ LiteConv(
+ in_channels=in_channels,
+ out_channels=ch_out,
+ with_act=i < layer_num - 1,
+ norm_type=norm_type))
+ else:
+ shortcut_conv.add_sublayer(
+ shortcut_name,
+ nn.Conv2D(
+ in_channels=in_channels,
+ out_channels=ch_out,
+ kernel_size=3,
+ padding=1,
+ weight_attr=ParamAttr(initializer=Normal(0, std)),
+ bias_attr=ParamAttr(
+ learning_rate=2., regularizer=L2Decay(0.))))
+ if i < layer_num - 1:
+ shortcut_conv.add_sublayer(shortcut_name + '.act',
+ nn.ReLU())
+ self.shortcut = self.add_sublayer('shortcut', shortcut_conv)
+
+ def forward(self, feat):
+ out = self.shortcut(feat)
+ return out
+
+
+@register
+@serializable
+class TTFFPN(nn.Layer):
+ """
+ Args:
+ in_channels (list): number of input feature channels from backbone.
+ [128,256,512,1024] by default, means the channels of DarkNet53
+ backbone return_idx [1,2,3,4].
+ planes (list): the number of output feature channels of FPN.
+ [256, 128, 64] by default
+ shortcut_num (list): the number of convolution layers in each shortcut.
+ [3,2,1] by default, means DarkNet53 backbone return_idx_1 has 3 convs
+ in its shortcut, return_idx_2 has 2 convs and return_idx_3 has 1 conv.
+ norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
+ bn by default
+ lite_neck (bool): whether to use lite conv in TTFNet FPN,
+ False by default
+ fusion_method (string): the method to fusion upsample and lateral layer.
+ 'add' and 'concat' are optional, add by default
+ """
+
+ __shared__ = ['norm_type']
+
+ def __init__(self,
+ in_channels,
+ planes=[256, 128, 64],
+ shortcut_num=[3, 2, 1],
+ norm_type='bn',
+ lite_neck=False,
+ fusion_method='add'):
+ super(TTFFPN, self).__init__()
+ self.planes = planes
+ self.shortcut_num = shortcut_num[::-1]
+ self.shortcut_len = len(shortcut_num)
+ self.ch_in = in_channels[::-1]
+ self.fusion_method = fusion_method
+
+ self.upsample_list = []
+ self.shortcut_list = []
+ self.upper_list = []
+ for i, out_c in enumerate(self.planes):
+ in_c = self.ch_in[i] if i == 0 else self.upper_list[-1]
+ upsample_module = LiteUpsample if lite_neck else Upsample
+ upsample = self.add_sublayer(
+ 'upsample.' + str(i),
+ upsample_module(
+ in_c, out_c, norm_type=norm_type))
+ self.upsample_list.append(upsample)
+ if i < self.shortcut_len:
+ shortcut = self.add_sublayer(
+ 'shortcut.' + str(i),
+ ShortCut(
+ self.shortcut_num[i],
+ self.ch_in[i + 1],
+ out_c,
+ norm_type=norm_type,
+ lite_neck=lite_neck,
+ name='shortcut.' + str(i)))
+ self.shortcut_list.append(shortcut)
+ if self.fusion_method == 'add':
+ upper_c = out_c
+ elif self.fusion_method == 'concat':
+ upper_c = out_c * 2
+ else:
+ raise ValueError('Illegal fusion method. Expected add or\
+ concat, but received {}'.format(self.fusion_method))
+ self.upper_list.append(upper_c)
+
+ def forward(self, inputs):
+ feat = inputs[-1]
+ for i, out_c in enumerate(self.planes):
+ feat = self.upsample_list[i](feat)
+ if i < self.shortcut_len:
+ shortcut = self.shortcut_list[i](inputs[-i - 2])
+ if self.fusion_method == 'add':
+ feat = feat + shortcut
+ else:
+ feat = paddle.concat([feat, shortcut], axis=1)
+ return feat
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape], }
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=self.upper_list[-1], )]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/yolo_fpn.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/yolo_fpn.py
new file mode 100644
index 000000000..4af0348d2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/necks/yolo_fpn.py
@@ -0,0 +1,988 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register, serializable
+from ppdet.modeling.layers import DropBlock
+from ..backbones.darknet import ConvBNLayer
+from ..shape_spec import ShapeSpec
+
+__all__ = ['YOLOv3FPN', 'PPYOLOFPN', 'PPYOLOTinyFPN', 'PPYOLOPAN']
+
+
+def add_coord(x, data_format):
+ b = paddle.shape(x)[0]
+ if data_format == 'NCHW':
+ h, w = x.shape[2], x.shape[3]
+ else:
+ h, w = x.shape[1], x.shape[2]
+
+ gx = paddle.cast(paddle.arange(w) / ((w - 1.) * 2.0) - 1., x.dtype)
+ gy = paddle.cast(paddle.arange(h) / ((h - 1.) * 2.0) - 1., x.dtype)
+
+ if data_format == 'NCHW':
+ gx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])
+ gy = gy.reshape([1, 1, h, 1]).expand([b, 1, h, w])
+ else:
+ gx = gx.reshape([1, 1, w, 1]).expand([b, h, w, 1])
+ gy = gy.reshape([1, h, 1, 1]).expand([b, h, w, 1])
+
+ gx.stop_gradient = True
+ gy.stop_gradient = True
+ return gx, gy
+
+
+class YoloDetBlock(nn.Layer):
+ def __init__(self,
+ ch_in,
+ channel,
+ norm_type,
+ freeze_norm=False,
+ name='',
+ data_format='NCHW'):
+ """
+ YOLODetBlock layer for yolov3, see https://arxiv.org/abs/1804.02767
+
+ Args:
+ ch_in (int): input channel
+ channel (int): base channel
+ norm_type (str): batch norm type
+ freeze_norm (bool): whether to freeze norm, default False
+ name (str): layer name
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(YoloDetBlock, self).__init__()
+ self.ch_in = ch_in
+ self.channel = channel
+ assert channel % 2 == 0, \
+ "channel {} cannot be divided by 2".format(channel)
+ conv_def = [
+ ['conv0', ch_in, channel, 1, '.0.0'],
+ ['conv1', channel, channel * 2, 3, '.0.1'],
+ ['conv2', channel * 2, channel, 1, '.1.0'],
+ ['conv3', channel, channel * 2, 3, '.1.1'],
+ ['route', channel * 2, channel, 1, '.2'],
+ ]
+
+ self.conv_module = nn.Sequential()
+ for idx, (conv_name, ch_in, ch_out, filter_size,
+ post_name) in enumerate(conv_def):
+ self.conv_module.add_sublayer(
+ conv_name,
+ ConvBNLayer(
+ ch_in=ch_in,
+ ch_out=ch_out,
+ filter_size=filter_size,
+ padding=(filter_size - 1) // 2,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ data_format=data_format,
+ name=name + post_name))
+
+ self.tip = ConvBNLayer(
+ ch_in=channel,
+ ch_out=channel * 2,
+ filter_size=3,
+ padding=1,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ data_format=data_format,
+ name=name + '.tip')
+
+ def forward(self, inputs):
+ route = self.conv_module(inputs)
+ tip = self.tip(route)
+ return route, tip
+
+
+class SPP(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ k,
+ pool_size,
+ norm_type,
+ freeze_norm=False,
+ name='',
+ act='leaky',
+ data_format='NCHW'):
+ """
+ SPP layer, which consist of four pooling layer follwed by conv layer
+
+ Args:
+ ch_in (int): input channel of conv layer
+ ch_out (int): output channel of conv layer
+ k (int): kernel size of conv layer
+ norm_type (str): batch norm type
+ freeze_norm (bool): whether to freeze norm, default False
+ name (str): layer name
+ act (str): activation function
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(SPP, self).__init__()
+ self.pool = []
+ self.data_format = data_format
+ for size in pool_size:
+ pool = self.add_sublayer(
+ '{}.pool1'.format(name),
+ nn.MaxPool2D(
+ kernel_size=size,
+ stride=1,
+ padding=size // 2,
+ data_format=data_format,
+ ceil_mode=False))
+ self.pool.append(pool)
+ self.conv = ConvBNLayer(
+ ch_in,
+ ch_out,
+ k,
+ padding=k // 2,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ name=name,
+ act=act,
+ data_format=data_format)
+
+ def forward(self, x):
+ outs = [x]
+ for pool in self.pool:
+ outs.append(pool(x))
+ if self.data_format == "NCHW":
+ y = paddle.concat(outs, axis=1)
+ else:
+ y = paddle.concat(outs, axis=-1)
+
+ y = self.conv(y)
+ return y
+
+
+class CoordConv(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ filter_size,
+ padding,
+ norm_type,
+ freeze_norm=False,
+ name='',
+ data_format='NCHW'):
+ """
+ CoordConv layer, see https://arxiv.org/abs/1807.03247
+
+ Args:
+ ch_in (int): input channel
+ ch_out (int): output channel
+ filter_size (int): filter size, default 3
+ padding (int): padding size, default 0
+ norm_type (str): batch norm type, default bn
+ name (str): layer name
+ data_format (str): data format, NCHW or NHWC
+
+ """
+ super(CoordConv, self).__init__()
+ self.conv = ConvBNLayer(
+ ch_in + 2,
+ ch_out,
+ filter_size=filter_size,
+ padding=padding,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ data_format=data_format,
+ name=name)
+ self.data_format = data_format
+
+ def forward(self, x):
+ gx, gy = add_coord(x, self.data_format)
+ if self.data_format == 'NCHW':
+ y = paddle.concat([x, gx, gy], axis=1)
+ else:
+ y = paddle.concat([x, gx, gy], axis=-1)
+ y = self.conv(y)
+ return y
+
+
+class PPYOLODetBlock(nn.Layer):
+ def __init__(self, cfg, name, data_format='NCHW'):
+ """
+ PPYOLODetBlock layer
+
+ Args:
+ cfg (list): layer configs for this block
+ name (str): block name
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(PPYOLODetBlock, self).__init__()
+ self.conv_module = nn.Sequential()
+ for idx, (conv_name, layer, args, kwargs) in enumerate(cfg[:-1]):
+ kwargs.update(
+ name='{}.{}'.format(name, conv_name), data_format=data_format)
+ self.conv_module.add_sublayer(conv_name, layer(*args, **kwargs))
+
+ conv_name, layer, args, kwargs = cfg[-1]
+ kwargs.update(
+ name='{}.{}'.format(name, conv_name), data_format=data_format)
+ self.tip = layer(*args, **kwargs)
+
+ def forward(self, inputs):
+ route = self.conv_module(inputs)
+ tip = self.tip(route)
+ return route, tip
+
+
+class PPYOLOTinyDetBlock(nn.Layer):
+ def __init__(self,
+ ch_in,
+ ch_out,
+ name,
+ drop_block=False,
+ block_size=3,
+ keep_prob=0.9,
+ data_format='NCHW'):
+ """
+ PPYOLO Tiny DetBlock layer
+ Args:
+ ch_in (list): input channel number
+ ch_out (list): output channel number
+ name (str): block name
+ drop_block: whether user DropBlock
+ block_size: drop block size
+ keep_prob: probability to keep block in DropBlock
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(PPYOLOTinyDetBlock, self).__init__()
+ self.drop_block_ = drop_block
+ self.conv_module = nn.Sequential()
+
+ cfgs = [
+ # name, in channels, out channels, filter_size,
+ # stride, padding, groups
+ ['.0', ch_in, ch_out, 1, 1, 0, 1],
+ ['.1', ch_out, ch_out, 5, 1, 2, ch_out],
+ ['.2', ch_out, ch_out, 1, 1, 0, 1],
+ ['.route', ch_out, ch_out, 5, 1, 2, ch_out],
+ ]
+ for cfg in cfgs:
+ conv_name, conv_ch_in, conv_ch_out, filter_size, stride, padding, \
+ groups = cfg
+ self.conv_module.add_sublayer(
+ name + conv_name,
+ ConvBNLayer(
+ ch_in=conv_ch_in,
+ ch_out=conv_ch_out,
+ filter_size=filter_size,
+ stride=stride,
+ padding=padding,
+ groups=groups,
+ name=name + conv_name))
+
+ self.tip = ConvBNLayer(
+ ch_in=ch_out,
+ ch_out=ch_out,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ groups=1,
+ name=name + conv_name)
+
+ if self.drop_block_:
+ self.drop_block = DropBlock(
+ block_size=block_size,
+ keep_prob=keep_prob,
+ data_format=data_format,
+ name=name + '.dropblock')
+
+ def forward(self, inputs):
+ if self.drop_block_:
+ inputs = self.drop_block(inputs)
+ route = self.conv_module(inputs)
+ tip = self.tip(route)
+ return route, tip
+
+
+class PPYOLODetBlockCSP(nn.Layer):
+ def __init__(self,
+ cfg,
+ ch_in,
+ ch_out,
+ act,
+ norm_type,
+ name,
+ data_format='NCHW'):
+ """
+ PPYOLODetBlockCSP layer
+
+ Args:
+ cfg (list): layer configs for this block
+ ch_in (int): input channel
+ ch_out (int): output channel
+ act (str): default mish
+ name (str): block name
+ data_format (str): data format, NCHW or NHWC
+ """
+ super(PPYOLODetBlockCSP, self).__init__()
+ self.data_format = data_format
+ self.conv1 = ConvBNLayer(
+ ch_in,
+ ch_out,
+ 1,
+ padding=0,
+ act=act,
+ norm_type=norm_type,
+ name=name + '.left',
+ data_format=data_format)
+ self.conv2 = ConvBNLayer(
+ ch_in,
+ ch_out,
+ 1,
+ padding=0,
+ act=act,
+ norm_type=norm_type,
+ name=name + '.right',
+ data_format=data_format)
+ self.conv3 = ConvBNLayer(
+ ch_out * 2,
+ ch_out * 2,
+ 1,
+ padding=0,
+ act=act,
+ norm_type=norm_type,
+ name=name,
+ data_format=data_format)
+ self.conv_module = nn.Sequential()
+ for idx, (layer_name, layer, args, kwargs) in enumerate(cfg):
+ kwargs.update(name=name + layer_name, data_format=data_format)
+ self.conv_module.add_sublayer(layer_name, layer(*args, **kwargs))
+
+ def forward(self, inputs):
+ conv_left = self.conv1(inputs)
+ conv_right = self.conv2(inputs)
+ conv_left = self.conv_module(conv_left)
+ if self.data_format == 'NCHW':
+ conv = paddle.concat([conv_left, conv_right], axis=1)
+ else:
+ conv = paddle.concat([conv_left, conv_right], axis=-1)
+
+ conv = self.conv3(conv)
+ return conv, conv
+
+
+@register
+@serializable
+class YOLOv3FPN(nn.Layer):
+ __shared__ = ['norm_type', 'data_format']
+
+ def __init__(self,
+ in_channels=[256, 512, 1024],
+ norm_type='bn',
+ freeze_norm=False,
+ data_format='NCHW'):
+ """
+ YOLOv3FPN layer
+
+ Args:
+ in_channels (list): input channels for fpn
+ norm_type (str): batch norm type, default bn
+ data_format (str): data format, NCHW or NHWC
+
+ """
+ super(YOLOv3FPN, self).__init__()
+ assert len(in_channels) > 0, "in_channels length should > 0"
+ self.in_channels = in_channels
+ self.num_blocks = len(in_channels)
+
+ self._out_channels = []
+ self.yolo_blocks = []
+ self.routes = []
+ self.data_format = data_format
+ for i in range(self.num_blocks):
+ name = 'yolo_block.{}'.format(i)
+ in_channel = in_channels[-i - 1]
+ if i > 0:
+ in_channel += 512 // (2**i)
+ yolo_block = self.add_sublayer(
+ name,
+ YoloDetBlock(
+ in_channel,
+ channel=512 // (2**i),
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ data_format=data_format,
+ name=name))
+ self.yolo_blocks.append(yolo_block)
+ # tip layer output channel doubled
+ self._out_channels.append(1024 // (2**i))
+
+ if i < self.num_blocks - 1:
+ name = 'yolo_transition.{}'.format(i)
+ route = self.add_sublayer(
+ name,
+ ConvBNLayer(
+ ch_in=512 // (2**i),
+ ch_out=256 // (2**i),
+ filter_size=1,
+ stride=1,
+ padding=0,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ data_format=data_format,
+ name=name))
+ self.routes.append(route)
+
+ def forward(self, blocks, for_mot=False):
+ assert len(blocks) == self.num_blocks
+ blocks = blocks[::-1]
+ yolo_feats = []
+
+ # add embedding features output for multi-object tracking model
+ if for_mot:
+ emb_feats = []
+
+ for i, block in enumerate(blocks):
+ if i > 0:
+ if self.data_format == 'NCHW':
+ block = paddle.concat([route, block], axis=1)
+ else:
+ block = paddle.concat([route, block], axis=-1)
+ route, tip = self.yolo_blocks[i](block)
+ yolo_feats.append(tip)
+
+ if for_mot:
+ # add embedding features output
+ emb_feats.append(route)
+
+ if i < self.num_blocks - 1:
+ route = self.routes[i](route)
+ route = F.interpolate(
+ route, scale_factor=2., data_format=self.data_format)
+
+ if for_mot:
+ return {'yolo_feats': yolo_feats, 'emb_feats': emb_feats}
+ else:
+ return yolo_feats
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape], }
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
+
+
+@register
+@serializable
+class PPYOLOFPN(nn.Layer):
+ __shared__ = ['norm_type', 'data_format']
+
+ def __init__(self,
+ in_channels=[512, 1024, 2048],
+ norm_type='bn',
+ freeze_norm=False,
+ data_format='NCHW',
+ coord_conv=False,
+ conv_block_num=2,
+ drop_block=False,
+ block_size=3,
+ keep_prob=0.9,
+ spp=False):
+ """
+ PPYOLOFPN layer
+
+ Args:
+ in_channels (list): input channels for fpn
+ norm_type (str): batch norm type, default bn
+ data_format (str): data format, NCHW or NHWC
+ coord_conv (bool): whether use CoordConv or not
+ conv_block_num (int): conv block num of each pan block
+ drop_block (bool): whether use DropBlock or not
+ block_size (int): block size of DropBlock
+ keep_prob (float): keep probability of DropBlock
+ spp (bool): whether use spp or not
+
+ """
+ super(PPYOLOFPN, self).__init__()
+ assert len(in_channels) > 0, "in_channels length should > 0"
+ self.in_channels = in_channels
+ self.num_blocks = len(in_channels)
+ # parse kwargs
+ self.coord_conv = coord_conv
+ self.drop_block = drop_block
+ self.block_size = block_size
+ self.keep_prob = keep_prob
+ self.spp = spp
+ self.conv_block_num = conv_block_num
+ self.data_format = data_format
+ if self.coord_conv:
+ ConvLayer = CoordConv
+ else:
+ ConvLayer = ConvBNLayer
+
+ if self.drop_block:
+ dropblock_cfg = [[
+ 'dropblock', DropBlock, [self.block_size, self.keep_prob],
+ dict()
+ ]]
+ else:
+ dropblock_cfg = []
+
+ self._out_channels = []
+ self.yolo_blocks = []
+ self.routes = []
+ for i, ch_in in enumerate(self.in_channels[::-1]):
+ if i > 0:
+ ch_in += 512 // (2**i)
+ channel = 64 * (2**self.num_blocks) // (2**i)
+ base_cfg = []
+ c_in, c_out = ch_in, channel
+ for j in range(self.conv_block_num):
+ base_cfg += [
+ [
+ 'conv{}'.format(2 * j), ConvLayer, [c_in, c_out, 1],
+ dict(
+ padding=0,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm)
+ ],
+ [
+ 'conv{}'.format(2 * j + 1), ConvBNLayer,
+ [c_out, c_out * 2, 3], dict(
+ padding=1,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm)
+ ],
+ ]
+ c_in, c_out = c_out * 2, c_out
+
+ base_cfg += [[
+ 'route', ConvLayer, [c_in, c_out, 1], dict(
+ padding=0, norm_type=norm_type, freeze_norm=freeze_norm)
+ ], [
+ 'tip', ConvLayer, [c_out, c_out * 2, 3], dict(
+ padding=1, norm_type=norm_type, freeze_norm=freeze_norm)
+ ]]
+
+ if self.conv_block_num == 2:
+ if i == 0:
+ if self.spp:
+ spp_cfg = [[
+ 'spp', SPP, [channel * 4, channel, 1], dict(
+ pool_size=[5, 9, 13],
+ norm_type=norm_type,
+ freeze_norm=freeze_norm)
+ ]]
+ else:
+ spp_cfg = []
+ cfg = base_cfg[0:3] + spp_cfg + base_cfg[
+ 3:4] + dropblock_cfg + base_cfg[4:6]
+ else:
+ cfg = base_cfg[0:2] + dropblock_cfg + base_cfg[2:6]
+ elif self.conv_block_num == 0:
+ if self.spp and i == 0:
+ spp_cfg = [[
+ 'spp', SPP, [c_in * 4, c_in, 1], dict(
+ pool_size=[5, 9, 13],
+ norm_type=norm_type,
+ freeze_norm=freeze_norm)
+ ]]
+ else:
+ spp_cfg = []
+ cfg = spp_cfg + dropblock_cfg + base_cfg
+ name = 'yolo_block.{}'.format(i)
+ yolo_block = self.add_sublayer(name, PPYOLODetBlock(cfg, name))
+ self.yolo_blocks.append(yolo_block)
+ self._out_channels.append(channel * 2)
+ if i < self.num_blocks - 1:
+ name = 'yolo_transition.{}'.format(i)
+ route = self.add_sublayer(
+ name,
+ ConvBNLayer(
+ ch_in=channel,
+ ch_out=256 // (2**i),
+ filter_size=1,
+ stride=1,
+ padding=0,
+ norm_type=norm_type,
+ freeze_norm=freeze_norm,
+ data_format=data_format,
+ name=name))
+ self.routes.append(route)
+
+ def forward(self, blocks, for_mot=False):
+ assert len(blocks) == self.num_blocks
+ blocks = blocks[::-1]
+ yolo_feats = []
+
+ # add embedding features output for multi-object tracking model
+ if for_mot:
+ emb_feats = []
+
+ for i, block in enumerate(blocks):
+ if i > 0:
+ if self.data_format == 'NCHW':
+ block = paddle.concat([route, block], axis=1)
+ else:
+ block = paddle.concat([route, block], axis=-1)
+ route, tip = self.yolo_blocks[i](block)
+ yolo_feats.append(tip)
+
+ if for_mot:
+ # add embedding features output
+ emb_feats.append(route)
+
+ if i < self.num_blocks - 1:
+ route = self.routes[i](route)
+ route = F.interpolate(
+ route, scale_factor=2., data_format=self.data_format)
+
+ if for_mot:
+ return {'yolo_feats': yolo_feats, 'emb_feats': emb_feats}
+ else:
+ return yolo_feats
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape], }
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
+
+
+@register
+@serializable
+class PPYOLOTinyFPN(nn.Layer):
+ __shared__ = ['norm_type', 'data_format']
+
+ def __init__(self,
+ in_channels=[80, 56, 34],
+ detection_block_channels=[160, 128, 96],
+ norm_type='bn',
+ data_format='NCHW',
+ **kwargs):
+ """
+ PPYOLO Tiny FPN layer
+ Args:
+ in_channels (list): input channels for fpn
+ detection_block_channels (list): channels in fpn
+ norm_type (str): batch norm type, default bn
+ data_format (str): data format, NCHW or NHWC
+ kwargs: extra key-value pairs, such as parameter of DropBlock and spp
+ """
+ super(PPYOLOTinyFPN, self).__init__()
+ assert len(in_channels) > 0, "in_channels length should > 0"
+ self.in_channels = in_channels[::-1]
+ assert len(detection_block_channels
+ ) > 0, "detection_block_channelslength should > 0"
+ self.detection_block_channels = detection_block_channels
+ self.data_format = data_format
+ self.num_blocks = len(in_channels)
+ # parse kwargs
+ self.drop_block = kwargs.get('drop_block', False)
+ self.block_size = kwargs.get('block_size', 3)
+ self.keep_prob = kwargs.get('keep_prob', 0.9)
+
+ self.spp_ = kwargs.get('spp', False)
+ if self.spp_:
+ self.spp = SPP(self.in_channels[0] * 4,
+ self.in_channels[0],
+ k=1,
+ pool_size=[5, 9, 13],
+ norm_type=norm_type,
+ name='spp')
+
+ self._out_channels = []
+ self.yolo_blocks = []
+ self.routes = []
+ for i, (
+ ch_in, ch_out
+ ) in enumerate(zip(self.in_channels, self.detection_block_channels)):
+ name = 'yolo_block.{}'.format(i)
+ if i > 0:
+ ch_in += self.detection_block_channels[i - 1]
+ yolo_block = self.add_sublayer(
+ name,
+ PPYOLOTinyDetBlock(
+ ch_in,
+ ch_out,
+ name,
+ drop_block=self.drop_block,
+ block_size=self.block_size,
+ keep_prob=self.keep_prob))
+ self.yolo_blocks.append(yolo_block)
+ self._out_channels.append(ch_out)
+
+ if i < self.num_blocks - 1:
+ name = 'yolo_transition.{}'.format(i)
+ route = self.add_sublayer(
+ name,
+ ConvBNLayer(
+ ch_in=ch_out,
+ ch_out=ch_out,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ norm_type=norm_type,
+ data_format=data_format,
+ name=name))
+ self.routes.append(route)
+
+ def forward(self, blocks, for_mot=False):
+ assert len(blocks) == self.num_blocks
+ blocks = blocks[::-1]
+ yolo_feats = []
+
+ # add embedding features output for multi-object tracking model
+ if for_mot:
+ emb_feats = []
+
+ for i, block in enumerate(blocks):
+ if i == 0 and self.spp_:
+ block = self.spp(block)
+
+ if i > 0:
+ if self.data_format == 'NCHW':
+ block = paddle.concat([route, block], axis=1)
+ else:
+ block = paddle.concat([route, block], axis=-1)
+ route, tip = self.yolo_blocks[i](block)
+ yolo_feats.append(tip)
+
+ if for_mot:
+ # add embedding features output
+ emb_feats.append(route)
+
+ if i < self.num_blocks - 1:
+ route = self.routes[i](route)
+ route = F.interpolate(
+ route, scale_factor=2., data_format=self.data_format)
+
+ if for_mot:
+ return {'yolo_feats': yolo_feats, 'emb_feats': emb_feats}
+ else:
+ return yolo_feats
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape], }
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
+
+
+@register
+@serializable
+class PPYOLOPAN(nn.Layer):
+ __shared__ = ['norm_type', 'data_format']
+
+ def __init__(self,
+ in_channels=[512, 1024, 2048],
+ norm_type='bn',
+ data_format='NCHW',
+ act='mish',
+ conv_block_num=3,
+ drop_block=False,
+ block_size=3,
+ keep_prob=0.9,
+ spp=False):
+ """
+ PPYOLOPAN layer with SPP, DropBlock and CSP connection.
+
+ Args:
+ in_channels (list): input channels for fpn
+ norm_type (str): batch norm type, default bn
+ data_format (str): data format, NCHW or NHWC
+ act (str): activation function, default mish
+ conv_block_num (int): conv block num of each pan block
+ drop_block (bool): whether use DropBlock or not
+ block_size (int): block size of DropBlock
+ keep_prob (float): keep probability of DropBlock
+ spp (bool): whether use spp or not
+
+ """
+ super(PPYOLOPAN, self).__init__()
+ assert len(in_channels) > 0, "in_channels length should > 0"
+ self.in_channels = in_channels
+ self.num_blocks = len(in_channels)
+ # parse kwargs
+ self.drop_block = drop_block
+ self.block_size = block_size
+ self.keep_prob = keep_prob
+ self.spp = spp
+ self.conv_block_num = conv_block_num
+ self.data_format = data_format
+ if self.drop_block:
+ dropblock_cfg = [[
+ 'dropblock', DropBlock, [self.block_size, self.keep_prob],
+ dict()
+ ]]
+ else:
+ dropblock_cfg = []
+
+ # fpn
+ self.fpn_blocks = []
+ self.fpn_routes = []
+ fpn_channels = []
+ for i, ch_in in enumerate(self.in_channels[::-1]):
+ if i > 0:
+ ch_in += 512 // (2**(i - 1))
+ channel = 512 // (2**i)
+ base_cfg = []
+ for j in range(self.conv_block_num):
+ base_cfg += [
+ # name, layer, args
+ [
+ '{}.0'.format(j), ConvBNLayer, [channel, channel, 1],
+ dict(
+ padding=0, act=act, norm_type=norm_type)
+ ],
+ [
+ '{}.1'.format(j), ConvBNLayer, [channel, channel, 3],
+ dict(
+ padding=1, act=act, norm_type=norm_type)
+ ]
+ ]
+
+ if i == 0 and self.spp:
+ base_cfg[3] = [
+ 'spp', SPP, [channel * 4, channel, 1], dict(
+ pool_size=[5, 9, 13], act=act, norm_type=norm_type)
+ ]
+
+ cfg = base_cfg[:4] + dropblock_cfg + base_cfg[4:]
+ name = 'fpn.{}'.format(i)
+ fpn_block = self.add_sublayer(
+ name,
+ PPYOLODetBlockCSP(cfg, ch_in, channel, act, norm_type, name,
+ data_format))
+ self.fpn_blocks.append(fpn_block)
+ fpn_channels.append(channel * 2)
+ if i < self.num_blocks - 1:
+ name = 'fpn_transition.{}'.format(i)
+ route = self.add_sublayer(
+ name,
+ ConvBNLayer(
+ ch_in=channel * 2,
+ ch_out=channel,
+ filter_size=1,
+ stride=1,
+ padding=0,
+ act=act,
+ norm_type=norm_type,
+ data_format=data_format,
+ name=name))
+ self.fpn_routes.append(route)
+ # pan
+ self.pan_blocks = []
+ self.pan_routes = []
+ self._out_channels = [512 // (2**(self.num_blocks - 2)), ]
+ for i in reversed(range(self.num_blocks - 1)):
+ name = 'pan_transition.{}'.format(i)
+ route = self.add_sublayer(
+ name,
+ ConvBNLayer(
+ ch_in=fpn_channels[i + 1],
+ ch_out=fpn_channels[i + 1],
+ filter_size=3,
+ stride=2,
+ padding=1,
+ act=act,
+ norm_type=norm_type,
+ data_format=data_format,
+ name=name))
+ self.pan_routes = [route, ] + self.pan_routes
+ base_cfg = []
+ ch_in = fpn_channels[i] + fpn_channels[i + 1]
+ channel = 512 // (2**i)
+ for j in range(self.conv_block_num):
+ base_cfg += [
+ # name, layer, args
+ [
+ '{}.0'.format(j), ConvBNLayer, [channel, channel, 1],
+ dict(
+ padding=0, act=act, norm_type=norm_type)
+ ],
+ [
+ '{}.1'.format(j), ConvBNLayer, [channel, channel, 3],
+ dict(
+ padding=1, act=act, norm_type=norm_type)
+ ]
+ ]
+
+ cfg = base_cfg[:4] + dropblock_cfg + base_cfg[4:]
+ name = 'pan.{}'.format(i)
+ pan_block = self.add_sublayer(
+ name,
+ PPYOLODetBlockCSP(cfg, ch_in, channel, act, norm_type, name,
+ data_format))
+
+ self.pan_blocks = [pan_block, ] + self.pan_blocks
+ self._out_channels.append(channel * 2)
+
+ self._out_channels = self._out_channels[::-1]
+
+ def forward(self, blocks, for_mot=False):
+ assert len(blocks) == self.num_blocks
+ blocks = blocks[::-1]
+ fpn_feats = []
+
+ # add embedding features output for multi-object tracking model
+ if for_mot:
+ emb_feats = []
+
+ for i, block in enumerate(blocks):
+ if i > 0:
+ if self.data_format == 'NCHW':
+ block = paddle.concat([route, block], axis=1)
+ else:
+ block = paddle.concat([route, block], axis=-1)
+ route, tip = self.fpn_blocks[i](block)
+ fpn_feats.append(tip)
+
+ if for_mot:
+ # add embedding features output
+ emb_feats.append(route)
+
+ if i < self.num_blocks - 1:
+ route = self.fpn_routes[i](route)
+ route = F.interpolate(
+ route, scale_factor=2., data_format=self.data_format)
+
+ pan_feats = [fpn_feats[-1], ]
+ route = fpn_feats[self.num_blocks - 1]
+ for i in reversed(range(self.num_blocks - 1)):
+ block = fpn_feats[i]
+ route = self.pan_routes[i](route)
+ if self.data_format == 'NCHW':
+ block = paddle.concat([route, block], axis=1)
+ else:
+ block = paddle.concat([route, block], axis=-1)
+
+ route, tip = self.pan_blocks[i](block)
+ pan_feats.append(tip)
+
+ if for_mot:
+ return {'yolo_feats': pan_feats[::-1], 'emb_feats': emb_feats}
+ else:
+ return pan_feats[::-1]
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'in_channels': [i.channels for i in input_shape], }
+
+ @property
+ def out_shape(self):
+ return [ShapeSpec(channels=c) for c in self._out_channels]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/ops.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/ops.py
new file mode 100644
index 000000000..593d8dd37
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/ops.py
@@ -0,0 +1,1601 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn.functional as F
+import paddle.nn as nn
+from paddle import ParamAttr
+from paddle.regularizer import L2Decay
+
+from paddle.fluid.framework import Variable, in_dygraph_mode
+from paddle.fluid import core
+from paddle.fluid.layer_helper import LayerHelper
+from paddle.fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
+
+__all__ = [
+ 'roi_pool',
+ 'roi_align',
+ 'prior_box',
+ 'generate_proposals',
+ 'iou_similarity',
+ 'box_coder',
+ 'yolo_box',
+ 'multiclass_nms',
+ 'distribute_fpn_proposals',
+ 'collect_fpn_proposals',
+ 'matrix_nms',
+ 'batch_norm',
+ 'mish',
+]
+
+
+def mish(x):
+ return x * paddle.tanh(F.softplus(x))
+
+
+def batch_norm(ch,
+ norm_type='bn',
+ norm_decay=0.,
+ freeze_norm=False,
+ initializer=None,
+ data_format='NCHW'):
+ if norm_type == 'sync_bn':
+ batch_norm = nn.SyncBatchNorm
+ else:
+ batch_norm = nn.BatchNorm2D
+
+ norm_lr = 0. if freeze_norm else 1.
+ weight_attr = ParamAttr(
+ initializer=initializer,
+ learning_rate=norm_lr,
+ regularizer=L2Decay(norm_decay),
+ trainable=False if freeze_norm else True)
+ bias_attr = ParamAttr(
+ learning_rate=norm_lr,
+ regularizer=L2Decay(norm_decay),
+ trainable=False if freeze_norm else True)
+
+ norm_layer = batch_norm(
+ ch,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr,
+ data_format=data_format)
+
+ norm_params = norm_layer.parameters()
+ if freeze_norm:
+ for param in norm_params:
+ param.stop_gradient = True
+
+ return norm_layer
+
+
+@paddle.jit.not_to_static
+def roi_pool(input,
+ rois,
+ output_size,
+ spatial_scale=1.0,
+ rois_num=None,
+ name=None):
+ """
+
+ This operator implements the roi_pooling layer.
+ Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).
+
+ The operator has three steps:
+
+ 1. Dividing each region proposal into equal-sized sections with output_size(h, w);
+ 2. Finding the largest value in each section;
+ 3. Copying these max values to the output buffer.
+
+ For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn
+
+ Args:
+ input (Tensor): Input feature, 4D-Tensor with the shape of [N,C,H,W],
+ where N is the batch size, C is the input channel, H is Height, W is weight.
+ The data type is float32 or float64.
+ rois (Tensor): ROIs (Regions of Interest) to pool over.
+ 2D-Tensor or 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1.
+ Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates,
+ and (x2, y2) is the bottom right coordinates.
+ output_size (int or tuple[int, int]): The pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.
+ spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0
+ rois_num (Tensor): The number of RoIs in each image. Default: None
+ name(str, optional): For detailed information, please refer
+ to :ref:`api_guide_Name`. Usually name is no need to set and
+ None by default.
+
+
+ Returns:
+ Tensor: The pooled feature, 4D-Tensor with the shape of [num_rois, C, output_size[0], output_size[1]].
+
+
+ Examples:
+
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+ paddle.enable_static()
+
+ x = paddle.static.data(
+ name='data', shape=[None, 256, 32, 32], dtype='float32')
+ rois = paddle.static.data(
+ name='rois', shape=[None, 4], dtype='float32')
+ rois_num = paddle.static.data(name='rois_num', shape=[None], dtype='int32')
+
+ pool_out = ops.roi_pool(
+ input=x,
+ rois=rois,
+ output_size=(1, 1),
+ spatial_scale=1.0,
+ rois_num=rois_num)
+ """
+ check_type(output_size, 'output_size', (int, tuple), 'roi_pool')
+ if isinstance(output_size, int):
+ output_size = (output_size, output_size)
+
+ pooled_height, pooled_width = output_size
+ if in_dygraph_mode():
+ assert rois_num is not None, "rois_num should not be None in dygraph mode."
+ pool_out, argmaxes = core.ops.roi_pool(
+ input, rois, rois_num, "pooled_height", pooled_height,
+ "pooled_width", pooled_width, "spatial_scale", spatial_scale)
+ return pool_out, argmaxes
+
+ else:
+ check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool')
+ check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool')
+ helper = LayerHelper('roi_pool', **locals())
+ dtype = helper.input_dtype()
+ pool_out = helper.create_variable_for_type_inference(dtype)
+ argmaxes = helper.create_variable_for_type_inference(dtype='int32')
+
+ inputs = {
+ "X": input,
+ "ROIs": rois,
+ }
+ if rois_num is not None:
+ inputs['RoisNum'] = rois_num
+ helper.append_op(
+ type="roi_pool",
+ inputs=inputs,
+ outputs={"Out": pool_out,
+ "Argmax": argmaxes},
+ attrs={
+ "pooled_height": pooled_height,
+ "pooled_width": pooled_width,
+ "spatial_scale": spatial_scale
+ })
+ return pool_out, argmaxes
+
+
+@paddle.jit.not_to_static
+def roi_align(input,
+ rois,
+ output_size,
+ spatial_scale=1.0,
+ sampling_ratio=-1,
+ rois_num=None,
+ aligned=True,
+ name=None):
+ """
+
+ Region of interest align (also known as RoI align) is to perform
+ bilinear interpolation on inputs of nonuniform sizes to obtain
+ fixed-size feature maps (e.g. 7*7)
+
+ Dividing each region proposal into equal-sized sections with
+ the pooled_width and pooled_height. Location remains the origin
+ result.
+
+ In each ROI bin, the value of the four regularly sampled locations
+ are computed directly through bilinear interpolation. The output is
+ the mean of four locations.
+ Thus avoid the misaligned problem.
+
+ Args:
+ input (Tensor): Input feature, 4D-Tensor with the shape of [N,C,H,W],
+ where N is the batch size, C is the input channel, H is Height, W is weight.
+ The data type is float32 or float64.
+ rois (Tensor): ROIs (Regions of Interest) to pool over.It should be
+ a 2-D Tensor or 2-D LoDTensor of shape (num_rois, 4), the lod level is 1.
+ The data type is float32 or float64. Given as [[x1, y1, x2, y2], ...],
+ (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates.
+ output_size (int or tuple[int, int]): The pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.
+ spatial_scale (float32, optional): Multiplicative spatial scale factor to translate ROI coords
+ from their input scale to the scale used when pooling. Default: 1.0
+ sampling_ratio(int32, optional): number of sampling points in the interpolation grid.
+ If <=0, then grid points are adaptive to roi_width and pooled_w, likewise for height. Default: -1
+ rois_num (Tensor): The number of RoIs in each image. Default: None
+ name(str, optional): For detailed information, please refer
+ to :ref:`api_guide_Name`. Usually name is no need to set and
+ None by default.
+
+ Returns:
+ Tensor:
+
+ Output: The output of ROIAlignOp is a 4-D tensor with shape (num_rois, channels, pooled_h, pooled_w). The data type is float32 or float64.
+
+
+ Examples:
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+ paddle.enable_static()
+
+ x = paddle.static.data(
+ name='data', shape=[None, 256, 32, 32], dtype='float32')
+ rois = paddle.static.data(
+ name='rois', shape=[None, 4], dtype='float32')
+ rois_num = paddle.static.data(name='rois_num', shape=[None], dtype='int32')
+ align_out = ops.roi_align(input=x,
+ rois=rois,
+ ouput_size=(7, 7),
+ spatial_scale=0.5,
+ sampling_ratio=-1,
+ rois_num=rois_num)
+ """
+ check_type(output_size, 'output_size', (int, tuple), 'roi_align')
+ if isinstance(output_size, int):
+ output_size = (output_size, output_size)
+
+ pooled_height, pooled_width = output_size
+
+ if in_dygraph_mode():
+ assert rois_num is not None, "rois_num should not be None in dygraph mode."
+ align_out = core.ops.roi_align(
+ input, rois, rois_num, "pooled_height", pooled_height,
+ "pooled_width", pooled_width, "spatial_scale", spatial_scale,
+ "sampling_ratio", sampling_ratio, "aligned", aligned)
+ return align_out
+
+ else:
+ check_variable_and_dtype(input, 'input', ['float32', 'float64'],
+ 'roi_align')
+ check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],
+ 'roi_align')
+ helper = LayerHelper('roi_align', **locals())
+ dtype = helper.input_dtype()
+ align_out = helper.create_variable_for_type_inference(dtype)
+ inputs = {
+ "X": input,
+ "ROIs": rois,
+ }
+ if rois_num is not None:
+ inputs['RoisNum'] = rois_num
+ helper.append_op(
+ type="roi_align",
+ inputs=inputs,
+ outputs={"Out": align_out},
+ attrs={
+ "pooled_height": pooled_height,
+ "pooled_width": pooled_width,
+ "spatial_scale": spatial_scale,
+ "sampling_ratio": sampling_ratio,
+ "aligned": aligned,
+ })
+ return align_out
+
+
+@paddle.jit.not_to_static
+def iou_similarity(x, y, box_normalized=True, name=None):
+ """
+ Computes intersection-over-union (IOU) between two box lists.
+ Box list 'X' should be a LoDTensor and 'Y' is a common Tensor,
+ boxes in 'Y' are shared by all instance of the batched inputs of X.
+ Given two boxes A and B, the calculation of IOU is as follows:
+
+ $$
+ IOU(A, B) =
+ \\frac{area(A\\cap B)}{area(A)+area(B)-area(A\\cap B)}
+ $$
+
+ Args:
+ x (Tensor): Box list X is a 2-D Tensor with shape [N, 4] holds N
+ boxes, each box is represented as [xmin, ymin, xmax, ymax],
+ the shape of X is [N, 4]. [xmin, ymin] is the left top
+ coordinate of the box if the input is image feature map, they
+ are close to the origin of the coordinate system.
+ [xmax, ymax] is the right bottom coordinate of the box.
+ The data type is float32 or float64.
+ y (Tensor): Box list Y holds M boxes, each box is represented as
+ [xmin, ymin, xmax, ymax], the shape of X is [N, 4].
+ [xmin, ymin] is the left top coordinate of the box if the
+ input is image feature map, and [xmax, ymax] is the right
+ bottom coordinate of the box. The data type is float32 or float64.
+ box_normalized(bool): Whether treat the priorbox as a normalized box.
+ Set true by default.
+ name(str, optional): For detailed information, please refer
+ to :ref:`api_guide_Name`. Usually name is no need to set and
+ None by default.
+
+ Returns:
+ Tensor: The output of iou_similarity op, a tensor with shape [N, M]
+ representing pairwise iou scores. The data type is same with x.
+
+ Examples:
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+ paddle.enable_static()
+
+ x = paddle.static.data(name='x', shape=[None, 4], dtype='float32')
+ y = paddle.static.data(name='y', shape=[None, 4], dtype='float32')
+ iou = ops.iou_similarity(x=x, y=y)
+ """
+
+ if in_dygraph_mode():
+ out = core.ops.iou_similarity(x, y, 'box_normalized', box_normalized)
+ return out
+ else:
+ helper = LayerHelper("iou_similarity", **locals())
+ out = helper.create_variable_for_type_inference(dtype=x.dtype)
+
+ helper.append_op(
+ type="iou_similarity",
+ inputs={"X": x,
+ "Y": y},
+ attrs={"box_normalized": box_normalized},
+ outputs={"Out": out})
+ return out
+
+
+@paddle.jit.not_to_static
+def collect_fpn_proposals(multi_rois,
+ multi_scores,
+ min_level,
+ max_level,
+ post_nms_top_n,
+ rois_num_per_level=None,
+ name=None):
+ """
+
+ **This OP only supports LoDTensor as input**. Concat multi-level RoIs
+ (Region of Interest) and select N RoIs with respect to multi_scores.
+ This operation performs the following steps:
+
+ 1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
+ 2. Concat multi-level RoIs and scores
+ 3. Sort scores and select post_nms_top_n scores
+ 4. Gather RoIs by selected indices from scores
+ 5. Re-sort RoIs by corresponding batch_id
+
+ Args:
+ multi_rois(list): List of RoIs to collect. Element in list is 2-D
+ LoDTensor with shape [N, 4] and data type is float32 or float64,
+ N is the number of RoIs.
+ multi_scores(list): List of scores of RoIs to collect. Element in list
+ is 2-D LoDTensor with shape [N, 1] and data type is float32 or
+ float64, N is the number of RoIs.
+ min_level(int): The lowest level of FPN layer to collect
+ max_level(int): The highest level of FPN layer to collect
+ post_nms_top_n(int): The number of selected RoIs
+ rois_num_per_level(list, optional): The List of RoIs' numbers.
+ Each element is 1-D Tensor which contains the RoIs' number of each
+ image on each level and the shape is [B] and data type is
+ int32, B is the number of images. If it is not None then return
+ a 1-D Tensor contains the output RoIs' number of each image and
+ the shape is [B]. Default: None
+ name(str, optional): For detailed information, please refer
+ to :ref:`api_guide_Name`. Usually name is no need to set and
+ None by default.
+
+ Returns:
+ Variable:
+
+ fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
+ float32 or float64. Selected RoIs.
+
+ rois_num(Tensor): 1-D Tensor contains the RoIs's number of each
+ image. The shape is [B] and data type is int32. B is the number of
+ images.
+
+ Examples:
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+ paddle.enable_static()
+ multi_rois = []
+ multi_scores = []
+ for i in range(4):
+ multi_rois.append(paddle.static.data(
+ name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
+ for i in range(4):
+ multi_scores.append(paddle.static.data(
+ name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
+
+ fpn_rois = ops.collect_fpn_proposals(
+ multi_rois=multi_rois,
+ multi_scores=multi_scores,
+ min_level=2,
+ max_level=5,
+ post_nms_top_n=2000)
+ """
+ check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
+ check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
+ num_lvl = max_level - min_level + 1
+ input_rois = multi_rois[:num_lvl]
+ input_scores = multi_scores[:num_lvl]
+
+ if in_dygraph_mode():
+ assert rois_num_per_level is not None, "rois_num_per_level should not be None in dygraph mode."
+ attrs = ('post_nms_topN', post_nms_top_n)
+ output_rois, rois_num = core.ops.collect_fpn_proposals(
+ input_rois, input_scores, rois_num_per_level, *attrs)
+ return output_rois, rois_num
+
+ else:
+ helper = LayerHelper('collect_fpn_proposals', **locals())
+ dtype = helper.input_dtype('multi_rois')
+ check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
+ 'collect_fpn_proposals')
+ output_rois = helper.create_variable_for_type_inference(dtype)
+ output_rois.stop_gradient = True
+
+ inputs = {
+ 'MultiLevelRois': input_rois,
+ 'MultiLevelScores': input_scores,
+ }
+ outputs = {'FpnRois': output_rois}
+ if rois_num_per_level is not None:
+ inputs['MultiLevelRoIsNum'] = rois_num_per_level
+ rois_num = helper.create_variable_for_type_inference(dtype='int32')
+ rois_num.stop_gradient = True
+ outputs['RoisNum'] = rois_num
+ helper.append_op(
+ type='collect_fpn_proposals',
+ inputs=inputs,
+ outputs=outputs,
+ attrs={'post_nms_topN': post_nms_top_n})
+ return output_rois, rois_num
+
+
+@paddle.jit.not_to_static
+def distribute_fpn_proposals(fpn_rois,
+ min_level,
+ max_level,
+ refer_level,
+ refer_scale,
+ pixel_offset=False,
+ rois_num=None,
+ name=None):
+ """
+
+ **This op only takes LoDTensor as input.** In Feature Pyramid Networks
+ (FPN) models, it is needed to distribute all proposals into different FPN
+ level, with respect to scale of the proposals, the referring scale and the
+ referring level. Besides, to restore the order of proposals, we return an
+ array which indicates the original index of rois in current proposals.
+ To compute FPN level for each roi, the formula is given as follows:
+
+ .. math::
+
+ roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
+
+ level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
+
+ where BBoxArea is a function to compute the area of each roi.
+
+ Args:
+
+ fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
+ float32 or float64. The input fpn_rois.
+ min_level(int32): The lowest level of FPN layer where the proposals come
+ from.
+ max_level(int32): The highest level of FPN layer where the proposals
+ come from.
+ refer_level(int32): The referring level of FPN layer with specified scale.
+ refer_scale(int32): The referring scale of FPN layer with specified level.
+ rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image.
+ The shape is [B] and data type is int32. B is the number of images.
+ If it is not None then return a list of 1-D Tensor. Each element
+ is the output RoIs' number of each image on the corresponding level
+ and the shape is [B]. None by default.
+ name(str, optional): For detailed information, please refer
+ to :ref:`api_guide_Name`. Usually name is no need to set and
+ None by default.
+
+ Returns:
+ Tuple:
+
+ multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
+ and data type of float32 and float64. The length is
+ max_level-min_level+1. The proposals in each FPN level.
+
+ restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
+ the number of total rois. The data type is int32. It is
+ used to restore the order of fpn_rois.
+
+ rois_num_per_level(List): A list of 1-D Tensor and each Tensor is
+ the RoIs' number in each image on the corresponding level. The shape
+ is [B] and data type of int32. B is the number of images
+
+
+ Examples:
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+ paddle.enable_static()
+ fpn_rois = paddle.static.data(
+ name='data', shape=[None, 4], dtype='float32', lod_level=1)
+ multi_rois, restore_ind = ops.distribute_fpn_proposals(
+ fpn_rois=fpn_rois,
+ min_level=2,
+ max_level=5,
+ refer_level=4,
+ refer_scale=224)
+ """
+ num_lvl = max_level - min_level + 1
+
+ if in_dygraph_mode():
+ assert rois_num is not None, "rois_num should not be None in dygraph mode."
+ attrs = ('min_level', min_level, 'max_level', max_level, 'refer_level',
+ refer_level, 'refer_scale', refer_scale, 'pixel_offset',
+ pixel_offset)
+ multi_rois, restore_ind, rois_num_per_level = core.ops.distribute_fpn_proposals(
+ fpn_rois, rois_num, num_lvl, num_lvl, *attrs)
+ return multi_rois, restore_ind, rois_num_per_level
+
+ else:
+ check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
+ 'distribute_fpn_proposals')
+ helper = LayerHelper('distribute_fpn_proposals', **locals())
+ dtype = helper.input_dtype('fpn_rois')
+ multi_rois = [
+ helper.create_variable_for_type_inference(dtype)
+ for i in range(num_lvl)
+ ]
+
+ restore_ind = helper.create_variable_for_type_inference(dtype='int32')
+
+ inputs = {'FpnRois': fpn_rois}
+ outputs = {
+ 'MultiFpnRois': multi_rois,
+ 'RestoreIndex': restore_ind,
+ }
+
+ if rois_num is not None:
+ inputs['RoisNum'] = rois_num
+ rois_num_per_level = [
+ helper.create_variable_for_type_inference(dtype='int32')
+ for i in range(num_lvl)
+ ]
+ outputs['MultiLevelRoIsNum'] = rois_num_per_level
+
+ helper.append_op(
+ type='distribute_fpn_proposals',
+ inputs=inputs,
+ outputs=outputs,
+ attrs={
+ 'min_level': min_level,
+ 'max_level': max_level,
+ 'refer_level': refer_level,
+ 'refer_scale': refer_scale,
+ 'pixel_offset': pixel_offset
+ })
+ return multi_rois, restore_ind, rois_num_per_level
+
+
+@paddle.jit.not_to_static
+def yolo_box(
+ x,
+ origin_shape,
+ anchors,
+ class_num,
+ conf_thresh,
+ downsample_ratio,
+ clip_bbox=True,
+ scale_x_y=1.,
+ name=None, ):
+ """
+
+ This operator generates YOLO detection boxes from output of YOLOv3 network.
+
+ The output of previous network is in shape [N, C, H, W], while H and W
+ should be the same, H and W specify the grid size, each grid point predict
+ given number boxes, this given number, which following will be represented as S,
+ is specified by the number of anchors. In the second dimension(the channel
+ dimension), C should be equal to S * (5 + class_num), class_num is the object
+ category number of source dataset(such as 80 in coco dataset), so the
+ second(channel) dimension, apart from 4 box location coordinates x, y, w, h,
+ also includes confidence score of the box and class one-hot key of each anchor
+ box.
+ Assume the 4 location coordinates are :math:`t_x, t_y, t_w, t_h`, the box
+ predictions should be as follows:
+ $$
+ b_x = \\sigma(t_x) + c_x
+ $$
+ $$
+ b_y = \\sigma(t_y) + c_y
+ $$
+ $$
+ b_w = p_w e^{t_w}
+ $$
+ $$
+ b_h = p_h e^{t_h}
+ $$
+ in the equation above, :math:`c_x, c_y` is the left top corner of current grid
+ and :math:`p_w, p_h` is specified by anchors.
+ The logistic regression value of the 5th channel of each anchor prediction boxes
+ represents the confidence score of each prediction box, and the logistic
+ regression value of the last :attr:`class_num` channels of each anchor prediction
+ boxes represents the classifcation scores. Boxes with confidence scores less than
+ :attr:`conf_thresh` should be ignored, and box final scores is the product of
+ confidence scores and classification scores.
+ $$
+ score_{pred} = score_{conf} * score_{class}
+ $$
+
+ Args:
+ x (Tensor): The input tensor of YoloBox operator is a 4-D tensor with shape of [N, C, H, W].
+ The second dimension(C) stores box locations, confidence score and
+ classification one-hot keys of each anchor box. Generally, X should be the output of YOLOv3 network.
+ The data type is float32 or float64.
+ origin_shape (Tensor): The image size tensor of YoloBox operator, This is a 2-D tensor with shape of [N, 2].
+ This tensor holds height and width of each input image used for resizing output box in input image
+ scale. The data type is int32.
+ anchors (list|tuple): The anchor width and height, it will be parsed pair by pair.
+ class_num (int): The number of classes to predict.
+ conf_thresh (float): The confidence scores threshold of detection boxes. Boxes with confidence scores
+ under threshold should be ignored.
+ downsample_ratio (int): The downsample ratio from network input to YoloBox operator input,
+ so 32, 16, 8 should be set for the first, second, and thrid YoloBox operators.
+ clip_bbox (bool): Whether clip output bonding box in Input(ImgSize) boundary. Default true.
+ scale_x_y (float): Scale the center point of decoded bounding box. Default 1.0.
+ name (string): The default value is None. Normally there is no need
+ for user to set this property. For more information,
+ please refer to :ref:`api_guide_Name`
+
+ Returns:
+ boxes Tensor: A 3-D tensor with shape [N, M, 4], the coordinates of boxes, N is the batch num,
+ M is output box number, and the 3rd dimension stores [xmin, ymin, xmax, ymax] coordinates of boxes.
+ scores Tensor: A 3-D tensor with shape [N, M, :attr:`class_num`], the coordinates of boxes, N is the batch num,
+ M is output box number.
+
+ Raises:
+ TypeError: Attr anchors of yolo box must be list or tuple
+ TypeError: Attr class_num of yolo box must be an integer
+ TypeError: Attr conf_thresh of yolo box must be a float number
+
+ Examples:
+
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+
+ paddle.enable_static()
+ x = paddle.static.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
+ img_size = paddle.static.data(name='img_size',shape=[None, 2],dtype='int64')
+ anchors = [10, 13, 16, 30, 33, 23]
+ boxes,scores = ops.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
+ conf_thresh=0.01, downsample_ratio=32)
+ """
+ helper = LayerHelper('yolo_box', **locals())
+
+ if not isinstance(anchors, list) and not isinstance(anchors, tuple):
+ raise TypeError("Attr anchors of yolo_box must be list or tuple")
+ if not isinstance(class_num, int):
+ raise TypeError("Attr class_num of yolo_box must be an integer")
+ if not isinstance(conf_thresh, float):
+ raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
+
+ if in_dygraph_mode():
+ attrs = ('anchors', anchors, 'class_num', class_num, 'conf_thresh',
+ conf_thresh, 'downsample_ratio', downsample_ratio, 'clip_bbox',
+ clip_bbox, 'scale_x_y', scale_x_y)
+ boxes, scores = core.ops.yolo_box(x, origin_shape, *attrs)
+ return boxes, scores
+ else:
+ boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
+ scores = helper.create_variable_for_type_inference(dtype=x.dtype)
+
+ attrs = {
+ "anchors": anchors,
+ "class_num": class_num,
+ "conf_thresh": conf_thresh,
+ "downsample_ratio": downsample_ratio,
+ "clip_bbox": clip_bbox,
+ "scale_x_y": scale_x_y,
+ }
+
+ helper.append_op(
+ type='yolo_box',
+ inputs={
+ "X": x,
+ "ImgSize": origin_shape,
+ },
+ outputs={
+ 'Boxes': boxes,
+ 'Scores': scores,
+ },
+ attrs=attrs)
+ return boxes, scores
+
+
+@paddle.jit.not_to_static
+def prior_box(input,
+ image,
+ min_sizes,
+ max_sizes=None,
+ aspect_ratios=[1.],
+ variance=[0.1, 0.1, 0.2, 0.2],
+ flip=False,
+ clip=False,
+ steps=[0.0, 0.0],
+ offset=0.5,
+ min_max_aspect_ratios_order=False,
+ name=None):
+ """
+
+ This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
+ Each position of the input produce N prior boxes, N is determined by
+ the count of min_sizes, max_sizes and aspect_ratios, The size of the
+ box is in range(min_size, max_size) interval, which is generated in
+ sequence according to the aspect_ratios.
+
+ Parameters:
+ input(Tensor): 4-D tensor(NCHW), the data type should be float32 or float64.
+ image(Tensor): 4-D tensor(NCHW), the input image data of PriorBoxOp,
+ the data type should be float32 or float64.
+ min_sizes(list|tuple|float): the min sizes of generated prior boxes.
+ max_sizes(list|tuple|None): the max sizes of generated prior boxes.
+ Default: None.
+ aspect_ratios(list|tuple|float): the aspect ratios of generated
+ prior boxes. Default: [1.].
+ variance(list|tuple): the variances to be encoded in prior boxes.
+ Default:[0.1, 0.1, 0.2, 0.2].
+ flip(bool): Whether to flip aspect ratios. Default:False.
+ clip(bool): Whether to clip out-of-boundary boxes. Default: False.
+ step(list|tuple): Prior boxes step across width and height, If
+ step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
+ height or weight of the input will be automatically calculated.
+ Default: [0., 0.]
+ offset(float): Prior boxes center offset. Default: 0.5
+ min_max_aspect_ratios_order(bool): If set True, the output prior box is
+ in order of [min, max, aspect_ratios], which is consistent with
+ Caffe. Please note, this order affects the weights order of
+ convolution layer followed by and does not affect the final
+ detection results. Default: False.
+ name(str, optional): The default value is None. Normally there is no need for
+ user to set this property. For more information, please refer to :ref:`api_guide_Name`
+
+ Returns:
+ Tuple: A tuple with two Variable (boxes, variances)
+
+ boxes(Tensor): the output prior boxes of PriorBox.
+ 4-D tensor, the layout is [H, W, num_priors, 4].
+ H is the height of input, W is the width of input,
+ num_priors is the total box count of each position of input.
+
+ variances(Tensor): the expanded variances of PriorBox.
+ 4-D tensor, the layput is [H, W, num_priors, 4].
+ H is the height of input, W is the width of input
+ num_priors is the total box count of each position of input
+
+ Examples:
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+
+ paddle.enable_static()
+ input = paddle.static.data(name="input", shape=[None,3,6,9])
+ image = paddle.static.data(name="image", shape=[None,3,9,12])
+ box, var = ops.prior_box(
+ input=input,
+ image=image,
+ min_sizes=[100.],
+ clip=True,
+ flip=True)
+ """
+ helper = LayerHelper("prior_box", **locals())
+ dtype = helper.input_dtype()
+ check_variable_and_dtype(
+ input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
+
+ def _is_list_or_tuple_(data):
+ return (isinstance(data, list) or isinstance(data, tuple))
+
+ if not _is_list_or_tuple_(min_sizes):
+ min_sizes = [min_sizes]
+ if not _is_list_or_tuple_(aspect_ratios):
+ aspect_ratios = [aspect_ratios]
+ if not (_is_list_or_tuple_(steps) and len(steps) == 2):
+ raise ValueError('steps should be a list or tuple ',
+ 'with length 2, (step_width, step_height).')
+
+ min_sizes = list(map(float, min_sizes))
+ aspect_ratios = list(map(float, aspect_ratios))
+ steps = list(map(float, steps))
+
+ cur_max_sizes = None
+ if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
+ if not _is_list_or_tuple_(max_sizes):
+ max_sizes = [max_sizes]
+ cur_max_sizes = max_sizes
+
+ if in_dygraph_mode():
+ attrs = ('min_sizes', min_sizes, 'aspect_ratios', aspect_ratios,
+ 'variances', variance, 'flip', flip, 'clip', clip, 'step_w',
+ steps[0], 'step_h', steps[1], 'offset', offset,
+ 'min_max_aspect_ratios_order', min_max_aspect_ratios_order)
+ if cur_max_sizes is not None:
+ attrs += ('max_sizes', cur_max_sizes)
+ box, var = core.ops.prior_box(input, image, *attrs)
+ return box, var
+ else:
+ attrs = {
+ 'min_sizes': min_sizes,
+ 'aspect_ratios': aspect_ratios,
+ 'variances': variance,
+ 'flip': flip,
+ 'clip': clip,
+ 'step_w': steps[0],
+ 'step_h': steps[1],
+ 'offset': offset,
+ 'min_max_aspect_ratios_order': min_max_aspect_ratios_order
+ }
+
+ if cur_max_sizes is not None:
+ attrs['max_sizes'] = cur_max_sizes
+
+ box = helper.create_variable_for_type_inference(dtype)
+ var = helper.create_variable_for_type_inference(dtype)
+ helper.append_op(
+ type="prior_box",
+ inputs={"Input": input,
+ "Image": image},
+ outputs={"Boxes": box,
+ "Variances": var},
+ attrs=attrs, )
+ box.stop_gradient = True
+ var.stop_gradient = True
+ return box, var
+
+
+@paddle.jit.not_to_static
+def multiclass_nms(bboxes,
+ scores,
+ score_threshold,
+ nms_top_k,
+ keep_top_k,
+ nms_threshold=0.3,
+ normalized=True,
+ nms_eta=1.,
+ background_label=-1,
+ return_index=False,
+ return_rois_num=True,
+ rois_num=None,
+ name=None):
+ """
+ This operator is to do multi-class non maximum suppression (NMS) on
+ boxes and scores.
+ In the NMS step, this operator greedily selects a subset of detection bounding
+ boxes that have high scores larger than score_threshold, if providing this
+ threshold, then selects the largest nms_top_k confidences scores if nms_top_k
+ is larger than -1. Then this operator pruns away boxes that have high IOU
+ (intersection over union) overlap with already selected boxes by adaptive
+ threshold NMS based on parameters of nms_threshold and nms_eta.
+ Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
+ per image if keep_top_k is larger than -1.
+ Args:
+ bboxes (Tensor): Two types of bboxes are supported:
+ 1. (Tensor) A 3-D Tensor with shape
+ [N, M, 4 or 8 16 24 32] represents the
+ predicted locations of M bounding bboxes,
+ N is the batch size. Each bounding box has four
+ coordinate values and the layout is
+ [xmin, ymin, xmax, ymax], when box size equals to 4.
+ 2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
+ M is the number of bounding boxes, C is the
+ class number
+ scores (Tensor): Two types of scores are supported:
+ 1. (Tensor) A 3-D Tensor with shape [N, C, M]
+ represents the predicted confidence predictions.
+ N is the batch size, C is the class number, M is
+ number of bounding boxes. For each category there
+ are total M scores which corresponding M bounding
+ boxes. Please note, M is equal to the 2nd dimension
+ of BBoxes.
+ 2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
+ M is the number of bbox, C is the class number.
+ In this case, input BBoxes should be the second
+ case with shape [M, C, 4].
+ background_label (int): The index of background label, the background
+ label will be ignored. If set to -1, then all
+ categories will be considered. Default: 0
+ score_threshold (float): Threshold to filter out bounding boxes with
+ low confidence score. If not provided,
+ consider all boxes.
+ nms_top_k (int): Maximum number of detections to be kept according to
+ the confidences after the filtering detections based
+ on score_threshold.
+ nms_threshold (float): The threshold to be used in NMS. Default: 0.3
+ nms_eta (float): The threshold to be used in NMS. Default: 1.0
+ keep_top_k (int): Number of total bboxes to be kept per image after NMS
+ step. -1 means keeping all bboxes after NMS step.
+ normalized (bool): Whether detections are normalized. Default: True
+ return_index(bool): Whether return selected index. Default: False
+ rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image.
+ The shape is [B] and data type is int32. B is the number of images.
+ If it is not None then return a list of 1-D Tensor. Each element
+ is the output RoIs' number of each image on the corresponding level
+ and the shape is [B]. None by default.
+ name(str): Name of the multiclass nms op. Default: None.
+ Returns:
+ A tuple with two Variables: (Out, Index) if return_index is True,
+ otherwise, a tuple with one Variable(Out) is returned.
+ Out: A 2-D LoDTensor with shape [No, 6] represents the detections.
+ Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
+ or A 2-D LoDTensor with shape [No, 10] represents the detections.
+ Each row has 10 values: [label, confidence, x1, y1, x2, y2, x3, y3,
+ x4, y4]. No is the total number of detections.
+ If all images have not detected results, all elements in LoD will be
+ 0, and output tensor is empty (None).
+ Index: Only return when return_index is True. A 2-D LoDTensor with
+ shape [No, 1] represents the selected index which type is Integer.
+ The index is the absolute value cross batches. No is the same number
+ as Out. If the index is used to gather other attribute such as age,
+ one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
+ N is the batch size and M is the number of boxes.
+ Examples:
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+ boxes = paddle.static.data(name='bboxes', shape=[81, 4],
+ dtype='float32', lod_level=1)
+ scores = paddle.static.data(name='scores', shape=[81],
+ dtype='float32', lod_level=1)
+ out, index = ops.multiclass_nms(bboxes=boxes,
+ scores=scores,
+ background_label=0,
+ score_threshold=0.5,
+ nms_top_k=400,
+ nms_threshold=0.3,
+ keep_top_k=200,
+ normalized=False,
+ return_index=True)
+ """
+ helper = LayerHelper('multiclass_nms3', **locals())
+
+ if in_dygraph_mode():
+ attrs = ('background_label', background_label, 'score_threshold',
+ score_threshold, 'nms_top_k', nms_top_k, 'nms_threshold',
+ nms_threshold, 'keep_top_k', keep_top_k, 'nms_eta', nms_eta,
+ 'normalized', normalized)
+ output, index, nms_rois_num = core.ops.multiclass_nms3(bboxes, scores,
+ rois_num, *attrs)
+ if not return_index:
+ index = None
+ return output, nms_rois_num, index
+
+ else:
+ output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
+ index = helper.create_variable_for_type_inference(dtype='int32')
+
+ inputs = {'BBoxes': bboxes, 'Scores': scores}
+ outputs = {'Out': output, 'Index': index}
+
+ if rois_num is not None:
+ inputs['RoisNum'] = rois_num
+
+ if return_rois_num:
+ nms_rois_num = helper.create_variable_for_type_inference(
+ dtype='int32')
+ outputs['NmsRoisNum'] = nms_rois_num
+
+ helper.append_op(
+ type="multiclass_nms3",
+ inputs=inputs,
+ attrs={
+ 'background_label': background_label,
+ 'score_threshold': score_threshold,
+ 'nms_top_k': nms_top_k,
+ 'nms_threshold': nms_threshold,
+ 'keep_top_k': keep_top_k,
+ 'nms_eta': nms_eta,
+ 'normalized': normalized
+ },
+ outputs=outputs)
+ output.stop_gradient = True
+ index.stop_gradient = True
+ if not return_index:
+ index = None
+ if not return_rois_num:
+ nms_rois_num = None
+
+ return output, nms_rois_num, index
+
+
+@paddle.jit.not_to_static
+def matrix_nms(bboxes,
+ scores,
+ score_threshold,
+ post_threshold,
+ nms_top_k,
+ keep_top_k,
+ use_gaussian=False,
+ gaussian_sigma=2.,
+ background_label=0,
+ normalized=True,
+ return_index=False,
+ return_rois_num=True,
+ name=None):
+ """
+ **Matrix NMS**
+ This operator does matrix non maximum suppression (NMS).
+ First selects a subset of candidate bounding boxes that have higher scores
+ than score_threshold (if provided), then the top k candidate is selected if
+ nms_top_k is larger than -1. Score of the remaining candidate are then
+ decayed according to the Matrix NMS scheme.
+ Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
+ per image if keep_top_k is larger than -1.
+ Args:
+ bboxes (Tensor): A 3-D Tensor with shape [N, M, 4] represents the
+ predicted locations of M bounding bboxes,
+ N is the batch size. Each bounding box has four
+ coordinate values and the layout is
+ [xmin, ymin, xmax, ymax], when box size equals to 4.
+ The data type is float32 or float64.
+ scores (Tensor): A 3-D Tensor with shape [N, C, M]
+ represents the predicted confidence predictions.
+ N is the batch size, C is the class number, M is
+ number of bounding boxes. For each category there
+ are total M scores which corresponding M bounding
+ boxes. Please note, M is equal to the 2nd dimension
+ of BBoxes. The data type is float32 or float64.
+ score_threshold (float): Threshold to filter out bounding boxes with
+ low confidence score.
+ post_threshold (float): Threshold to filter out bounding boxes with
+ low confidence score AFTER decaying.
+ nms_top_k (int): Maximum number of detections to be kept according to
+ the confidences after the filtering detections based
+ on score_threshold.
+ keep_top_k (int): Number of total bboxes to be kept per image after NMS
+ step. -1 means keeping all bboxes after NMS step.
+ use_gaussian (bool): Use Gaussian as the decay function. Default: False
+ gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
+ background_label (int): The index of background label, the background
+ label will be ignored. If set to -1, then all
+ categories will be considered. Default: 0
+ normalized (bool): Whether detections are normalized. Default: True
+ return_index(bool): Whether return selected index. Default: False
+ return_rois_num(bool): whether return rois_num. Default: True
+ name(str): Name of the matrix nms op. Default: None.
+ Returns:
+ A tuple with three Tensor: (Out, Index, RoisNum) if return_index is True,
+ otherwise, a tuple with two Tensor (Out, RoisNum) is returned.
+ Out (Tensor): A 2-D Tensor with shape [No, 6] containing the
+ detection results.
+ Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
+ (After version 1.3, when no boxes detected, the lod is changed
+ from {0} to {1})
+ Index (Tensor): A 2-D Tensor with shape [No, 1] containing the
+ selected indices, which are absolute values cross batches.
+ rois_num (Tensor): A 1-D Tensor with shape [N] containing
+ the number of detected boxes in each image.
+ Examples:
+ .. code-block:: python
+ import paddle
+ from ppdet.modeling import ops
+ boxes = paddle.static.data(name='bboxes', shape=[None,81, 4],
+ dtype='float32', lod_level=1)
+ scores = paddle.static.data(name='scores', shape=[None,81],
+ dtype='float32', lod_level=1)
+ out = ops.matrix_nms(bboxes=boxes, scores=scores, background_label=0,
+ score_threshold=0.5, post_threshold=0.1,
+ nms_top_k=400, keep_top_k=200, normalized=False)
+ """
+ check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
+ 'matrix_nms')
+ check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
+ 'matrix_nms')
+ check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
+ check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
+ check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
+ check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
+ check_type(normalized, 'normalized', bool, 'matrix_nms')
+ check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
+ check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
+ check_type(background_label, 'background_label', int, 'matrix_nms')
+
+ if in_dygraph_mode():
+ attrs = ('background_label', background_label, 'score_threshold',
+ score_threshold, 'post_threshold', post_threshold, 'nms_top_k',
+ nms_top_k, 'gaussian_sigma', gaussian_sigma, 'use_gaussian',
+ use_gaussian, 'keep_top_k', keep_top_k, 'normalized',
+ normalized)
+ out, index, rois_num = core.ops.matrix_nms(bboxes, scores, *attrs)
+ if not return_index:
+ index = None
+ if not return_rois_num:
+ rois_num = None
+ return out, rois_num, index
+ else:
+ helper = LayerHelper('matrix_nms', **locals())
+ output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
+ index = helper.create_variable_for_type_inference(dtype='int32')
+ outputs = {'Out': output, 'Index': index}
+ if return_rois_num:
+ rois_num = helper.create_variable_for_type_inference(dtype='int32')
+ outputs['RoisNum'] = rois_num
+
+ helper.append_op(
+ type="matrix_nms",
+ inputs={'BBoxes': bboxes,
+ 'Scores': scores},
+ attrs={
+ 'background_label': background_label,
+ 'score_threshold': score_threshold,
+ 'post_threshold': post_threshold,
+ 'nms_top_k': nms_top_k,
+ 'gaussian_sigma': gaussian_sigma,
+ 'use_gaussian': use_gaussian,
+ 'keep_top_k': keep_top_k,
+ 'normalized': normalized
+ },
+ outputs=outputs)
+ output.stop_gradient = True
+
+ if not return_index:
+ index = None
+ if not return_rois_num:
+ rois_num = None
+ return output, rois_num, index
+
+
+def bipartite_match(dist_matrix,
+ match_type=None,
+ dist_threshold=None,
+ name=None):
+ """
+
+ This operator implements a greedy bipartite matching algorithm, which is
+ used to obtain the matching with the maximum distance based on the input
+ distance matrix. For input 2D matrix, the bipartite matching algorithm can
+ find the matched column for each row (matched means the largest distance),
+ also can find the matched row for each column. And this operator only
+ calculate matched indices from column to row. For each instance,
+ the number of matched indices is the column number of the input distance
+ matrix. **The OP only supports CPU**.
+
+ There are two outputs, matched indices and distance.
+ A simple description, this algorithm matched the best (maximum distance)
+ row entity to the column entity and the matched indices are not duplicated
+ in each row of ColToRowMatchIndices. If the column entity is not matched
+ any row entity, set -1 in ColToRowMatchIndices.
+
+ NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
+ If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
+ If Tensor, the height of ColToRowMatchIndices is 1.
+
+ NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
+ layer. Please consider to use :code:`ssd_loss` instead.
+
+ Args:
+ dist_matrix(Tensor): This input is a 2-D LoDTensor with shape
+ [K, M]. The data type is float32 or float64. It is pair-wise
+ distance matrix between the entities represented by each row and
+ each column. For example, assumed one entity is A with shape [K],
+ another entity is B with shape [M]. The dist_matrix[i][j] is the
+ distance between A[i] and B[j]. The bigger the distance is, the
+ better matching the pairs are. NOTE: This tensor can contain LoD
+ information to represent a batch of inputs. One instance of this
+ batch can contain different numbers of entities.
+ match_type(str, optional): The type of matching method, should be
+ 'bipartite' or 'per_prediction'. None ('bipartite') by default.
+ dist_threshold(float32, optional): If `match_type` is 'per_prediction',
+ this threshold is to determine the extra matching bboxes based
+ on the maximum distance, 0.5 by default.
+ name(str, optional): For detailed information, please refer
+ to :ref:`api_guide_Name`. Usually name is no need to set and
+ None by default.
+
+ Returns:
+ Tuple:
+
+ matched_indices(Tensor): A 2-D Tensor with shape [N, M]. The data
+ type is int32. N is the batch size. If match_indices[i][j] is -1, it
+ means B[j] does not match any entity in i-th instance.
+ Otherwise, it means B[j] is matched to row
+ match_indices[i][j] in i-th instance. The row number of
+ i-th instance is saved in match_indices[i][j].
+
+ matched_distance(Tensor): A 2-D Tensor with shape [N, M]. The data
+ type is float32. N is batch size. If match_indices[i][j] is -1,
+ match_distance[i][j] is also -1.0. Otherwise, assumed
+ match_distance[i][j] = d, and the row offsets of each instance
+ are called LoD. Then match_distance[i][j] =
+ dist_matrix[d+LoD[i]][j].
+
+ Examples:
+
+ .. code-block:: python
+ import paddle
+ from ppdet.modeling import ops
+ from ppdet.modeling.utils import iou_similarity
+
+ paddle.enable_static()
+
+ x = paddle.static.data(name='x', shape=[None, 4], dtype='float32')
+ y = paddle.static.data(name='y', shape=[None, 4], dtype='float32')
+ iou = iou_similarity(x=x, y=y)
+ matched_indices, matched_dist = ops.bipartite_match(iou)
+ """
+ check_variable_and_dtype(dist_matrix, 'dist_matrix',
+ ['float32', 'float64'], 'bipartite_match')
+
+ if in_dygraph_mode():
+ match_indices, match_distance = core.ops.bipartite_match(
+ dist_matrix, "match_type", match_type, "dist_threshold",
+ dist_threshold)
+ return match_indices, match_distance
+
+ helper = LayerHelper('bipartite_match', **locals())
+ match_indices = helper.create_variable_for_type_inference(dtype='int32')
+ match_distance = helper.create_variable_for_type_inference(
+ dtype=dist_matrix.dtype)
+ helper.append_op(
+ type='bipartite_match',
+ inputs={'DistMat': dist_matrix},
+ attrs={
+ 'match_type': match_type,
+ 'dist_threshold': dist_threshold,
+ },
+ outputs={
+ 'ColToRowMatchIndices': match_indices,
+ 'ColToRowMatchDist': match_distance
+ })
+ return match_indices, match_distance
+
+
+@paddle.jit.not_to_static
+def box_coder(prior_box,
+ prior_box_var,
+ target_box,
+ code_type="encode_center_size",
+ box_normalized=True,
+ axis=0,
+ name=None):
+ """
+ **Box Coder Layer**
+ Encode/Decode the target bounding box with the priorbox information.
+
+ The Encoding schema described below:
+ .. math::
+ ox = (tx - px) / pw / pxv
+ oy = (ty - py) / ph / pyv
+ ow = \log(\abs(tw / pw)) / pwv
+ oh = \log(\abs(th / ph)) / phv
+ The Decoding schema described below:
+
+ .. math::
+
+ ox = (pw * pxv * tx * + px) - tw / 2
+ oy = (ph * pyv * ty * + py) - th / 2
+ ow = \exp(pwv * tw) * pw + tw / 2
+ oh = \exp(phv * th) * ph + th / 2
+ where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
+ width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
+ the priorbox's (anchor) center coordinates, width and height. `pxv`,
+ `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
+ `ow`, `oh` denote the encoded/decoded coordinates, width and height.
+ During Box Decoding, two modes for broadcast are supported. Say target
+ box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
+ [M, 4]. Then prior box will broadcast to target box along the
+ assigned axis.
+
+ Args:
+ prior_box(Tensor): Box list prior_box is a 2-D Tensor with shape
+ [M, 4] holds M boxes and data type is float32 or float64. Each box
+ is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
+ left top coordinate of the anchor box, if the input is image feature
+ map, they are close to the origin of the coordinate system.
+ [xmax, ymax] is the right bottom coordinate of the anchor box.
+ prior_box_var(List|Tensor|None): prior_box_var supports three types
+ of input. One is Tensor with shape [M, 4] which holds M group and
+ data type is float32 or float64. The second is list consist of
+ 4 elements shared by all boxes and data type is float32 or float64.
+ Other is None and not involved in calculation.
+ target_box(Tensor): This input can be a 2-D LoDTensor with shape
+ [N, 4] when code_type is 'encode_center_size'. This input also can
+ be a 3-D Tensor with shape [N, M, 4] when code_type is
+ 'decode_center_size'. Each box is represented as
+ [xmin, ymin, xmax, ymax]. The data type is float32 or float64.
+ code_type(str): The code type used with the target box. It can be
+ `encode_center_size` or `decode_center_size`. `encode_center_size`
+ by default.
+ box_normalized(bool): Whether treat the priorbox as a normalized box.
+ Set true by default.
+ axis(int): Which axis in PriorBox to broadcast for box decode,
+ for example, if axis is 0 and TargetBox has shape [N, M, 4] and
+ PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
+ for decoding. It is only valid when code type is
+ `decode_center_size`. Set 0 by default.
+ name(str, optional): For detailed information, please refer
+ to :ref:`api_guide_Name`. Usually name is no need to set and
+ None by default.
+
+ Returns:
+ Tensor:
+ output_box(Tensor): When code_type is 'encode_center_size', the
+ output tensor of box_coder_op with shape [N, M, 4] representing the
+ result of N target boxes encoded with M Prior boxes and variances.
+ When code_type is 'decode_center_size', N represents the batch size
+ and M represents the number of decoded boxes.
+
+ Examples:
+
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+ paddle.enable_static()
+ # For encode
+ prior_box_encode = paddle.static.data(name='prior_box_encode',
+ shape=[512, 4],
+ dtype='float32')
+ target_box_encode = paddle.static.data(name='target_box_encode',
+ shape=[81, 4],
+ dtype='float32')
+ output_encode = ops.box_coder(prior_box=prior_box_encode,
+ prior_box_var=[0.1,0.1,0.2,0.2],
+ target_box=target_box_encode,
+ code_type="encode_center_size")
+ # For decode
+ prior_box_decode = paddle.static.data(name='prior_box_decode',
+ shape=[512, 4],
+ dtype='float32')
+ target_box_decode = paddle.static.data(name='target_box_decode',
+ shape=[512, 81, 4],
+ dtype='float32')
+ output_decode = ops.box_coder(prior_box=prior_box_decode,
+ prior_box_var=[0.1,0.1,0.2,0.2],
+ target_box=target_box_decode,
+ code_type="decode_center_size",
+ box_normalized=False,
+ axis=1)
+ """
+ check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
+ 'box_coder')
+ check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
+ 'box_coder')
+
+ if in_dygraph_mode():
+ if isinstance(prior_box_var, Variable):
+ output_box = core.ops.box_coder(
+ prior_box, prior_box_var, target_box, "code_type", code_type,
+ "box_normalized", box_normalized, "axis", axis)
+
+ elif isinstance(prior_box_var, list):
+ output_box = core.ops.box_coder(
+ prior_box, None, target_box, "code_type", code_type,
+ "box_normalized", box_normalized, "axis", axis, "variance",
+ prior_box_var)
+ else:
+ raise TypeError(
+ "Input variance of box_coder must be Variable or list")
+ return output_box
+ else:
+ helper = LayerHelper("box_coder", **locals())
+
+ output_box = helper.create_variable_for_type_inference(
+ dtype=prior_box.dtype)
+
+ inputs = {"PriorBox": prior_box, "TargetBox": target_box}
+ attrs = {
+ "code_type": code_type,
+ "box_normalized": box_normalized,
+ "axis": axis
+ }
+ if isinstance(prior_box_var, Variable):
+ inputs['PriorBoxVar'] = prior_box_var
+ elif isinstance(prior_box_var, list):
+ attrs['variance'] = prior_box_var
+ else:
+ raise TypeError(
+ "Input variance of box_coder must be Variable or list")
+ helper.append_op(
+ type="box_coder",
+ inputs=inputs,
+ attrs=attrs,
+ outputs={"OutputBox": output_box})
+ return output_box
+
+
+@paddle.jit.not_to_static
+def generate_proposals(scores,
+ bbox_deltas,
+ im_shape,
+ anchors,
+ variances,
+ pre_nms_top_n=6000,
+ post_nms_top_n=1000,
+ nms_thresh=0.5,
+ min_size=0.1,
+ eta=1.0,
+ pixel_offset=False,
+ return_rois_num=False,
+ name=None):
+ """
+ **Generate proposal Faster-RCNN**
+ This operation proposes RoIs according to each box with their
+ probability to be a foreground object and
+ the box can be calculated by anchors. Bbox_deltais and scores
+ to be an object are the output of RPN. Final proposals
+ could be used to train detection net.
+ For generating proposals, this operation performs following steps:
+ 1. Transposes and resizes scores and bbox_deltas in size of
+ (H*W*A, 1) and (H*W*A, 4)
+ 2. Calculate box locations as proposals candidates.
+ 3. Clip boxes to image
+ 4. Remove predicted boxes with small area.
+ 5. Apply NMS to get final proposals as output.
+ Args:
+ scores(Tensor): A 4-D Tensor with shape [N, A, H, W] represents
+ the probability for each box to be an object.
+ N is batch size, A is number of anchors, H and W are height and
+ width of the feature map. The data type must be float32.
+ bbox_deltas(Tensor): A 4-D Tensor with shape [N, 4*A, H, W]
+ represents the difference between predicted box location and
+ anchor location. The data type must be float32.
+ im_shape(Tensor): A 2-D Tensor with shape [N, 2] represents H, W, the
+ origin image size or input size. The data type can be float32 or
+ float64.
+ anchors(Tensor): A 4-D Tensor represents the anchors with a layout
+ of [H, W, A, 4]. H and W are height and width of the feature map,
+ num_anchors is the box count of each position. Each anchor is
+ in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
+ variances(Tensor): A 4-D Tensor. The expanded variances of anchors with a layout of
+ [H, W, num_priors, 4]. Each variance is in
+ (xcenter, ycenter, w, h) format. The data type must be float32.
+ pre_nms_top_n(float): Number of total bboxes to be kept per
+ image before NMS. The data type must be float32. `6000` by default.
+ post_nms_top_n(float): Number of total bboxes to be kept per
+ image after NMS. The data type must be float32. `1000` by default.
+ nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
+ min_size(float): Remove predicted boxes with either height or
+ width < min_size. The data type must be float32. `0.1` by default.
+ eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
+ `adaptive_threshold = adaptive_threshold * eta` in each iteration.
+ return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
+ num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
+ the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
+ 'False' by default.
+ name(str, optional): For detailed information, please refer
+ to :ref:`api_guide_Name`. Usually name is no need to set and
+ None by default.
+
+ Returns:
+ tuple:
+ A tuple with format ``(rpn_rois, rpn_roi_probs)``.
+ - **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
+ - **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
+
+ Examples:
+ .. code-block:: python
+
+ import paddle
+ from ppdet.modeling import ops
+ paddle.enable_static()
+ scores = paddle.static.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
+ bbox_deltas = paddle.static.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
+ im_shape = paddle.static.data(name='im_shape', shape=[None, 2], dtype='float32')
+ anchors = paddle.static.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
+ variances = paddle.static.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
+ rois, roi_probs = ops.generate_proposals(scores, bbox_deltas,
+ im_shape, anchors, variances)
+ """
+ if in_dygraph_mode():
+ assert return_rois_num, "return_rois_num should be True in dygraph mode."
+ attrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN', post_nms_top_n,
+ 'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta,
+ 'pixel_offset', pixel_offset)
+ rpn_rois, rpn_roi_probs, rpn_rois_num = core.ops.generate_proposals_v2(
+ scores, bbox_deltas, im_shape, anchors, variances, *attrs)
+ return rpn_rois, rpn_roi_probs, rpn_rois_num
+
+ else:
+ helper = LayerHelper('generate_proposals_v2', **locals())
+
+ check_variable_and_dtype(scores, 'scores', ['float32'],
+ 'generate_proposals_v2')
+ check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
+ 'generate_proposals_v2')
+ check_variable_and_dtype(im_shape, 'im_shape', ['float32', 'float64'],
+ 'generate_proposals_v2')
+ check_variable_and_dtype(anchors, 'anchors', ['float32'],
+ 'generate_proposals_v2')
+ check_variable_and_dtype(variances, 'variances', ['float32'],
+ 'generate_proposals_v2')
+
+ rpn_rois = helper.create_variable_for_type_inference(
+ dtype=bbox_deltas.dtype)
+ rpn_roi_probs = helper.create_variable_for_type_inference(
+ dtype=scores.dtype)
+ outputs = {
+ 'RpnRois': rpn_rois,
+ 'RpnRoiProbs': rpn_roi_probs,
+ }
+ if return_rois_num:
+ rpn_rois_num = helper.create_variable_for_type_inference(
+ dtype='int32')
+ rpn_rois_num.stop_gradient = True
+ outputs['RpnRoisNum'] = rpn_rois_num
+
+ helper.append_op(
+ type="generate_proposals_v2",
+ inputs={
+ 'Scores': scores,
+ 'BboxDeltas': bbox_deltas,
+ 'ImShape': im_shape,
+ 'Anchors': anchors,
+ 'Variances': variances
+ },
+ attrs={
+ 'pre_nms_topN': pre_nms_top_n,
+ 'post_nms_topN': post_nms_top_n,
+ 'nms_thresh': nms_thresh,
+ 'min_size': min_size,
+ 'eta': eta,
+ 'pixel_offset': pixel_offset
+ },
+ outputs=outputs)
+ rpn_rois.stop_gradient = True
+ rpn_roi_probs.stop_gradient = True
+
+ return rpn_rois, rpn_roi_probs, rpn_rois_num
+
+
+def sigmoid_cross_entropy_with_logits(input,
+ label,
+ ignore_index=-100,
+ normalize=False):
+ output = F.binary_cross_entropy_with_logits(input, label, reduction='none')
+ mask_tensor = paddle.cast(label != ignore_index, 'float32')
+ output = paddle.multiply(output, mask_tensor)
+ if normalize:
+ sum_valid_mask = paddle.sum(mask_tensor)
+ output = output / sum_valid_mask
+ return output
+
+
+def smooth_l1(input, label, inside_weight=None, outside_weight=None,
+ sigma=None):
+ input_new = paddle.multiply(input, inside_weight)
+ label_new = paddle.multiply(label, inside_weight)
+ delta = 1 / (sigma * sigma)
+ out = F.smooth_l1_loss(input_new, label_new, reduction='none', delta=delta)
+ out = paddle.multiply(out, outside_weight)
+ out = out / delta
+ out = paddle.reshape(out, shape=[out.shape[0], -1])
+ out = paddle.sum(out, axis=1)
+ return out
+
+
+def channel_shuffle(x, groups):
+ batch_size, num_channels, height, width = x.shape[0:4]
+ assert num_channels % groups == 0, 'num_channels should be divisible by groups'
+ channels_per_group = num_channels // groups
+ x = paddle.reshape(
+ x=x, shape=[batch_size, groups, channels_per_group, height, width])
+ x = paddle.transpose(x=x, perm=[0, 2, 1, 3, 4])
+ x = paddle.reshape(x=x, shape=[batch_size, num_channels, height, width])
+ return x
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/post_process.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/post_process.py
new file mode 100644
index 000000000..679e09134
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/post_process.py
@@ -0,0 +1,656 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from ppdet.core.workspace import register
+from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
+from ppdet.modeling.layers import TTFBox
+from .transformers import bbox_cxcywh_to_xyxy
+try:
+ from collections.abc import Sequence
+except Exception:
+ from collections import Sequence
+
+__all__ = [
+ 'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
+ 'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
+ 'DETRBBoxPostProcess', 'SparsePostProcess'
+]
+
+
+@register
+class BBoxPostProcess(nn.Layer):
+ __shared__ = ['num_classes']
+ __inject__ = ['decode', 'nms']
+
+ def __init__(self, num_classes=80, decode=None, nms=None):
+ super(BBoxPostProcess, self).__init__()
+ self.num_classes = num_classes
+ self.decode = decode
+ self.nms = nms
+ self.fake_bboxes = paddle.to_tensor(
+ np.array(
+ [[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
+ self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
+
+ def forward(self, head_out, rois, im_shape, scale_factor):
+ """
+ Decode the bbox and do NMS if needed.
+
+ Args:
+ head_out (tuple): bbox_pred and cls_prob of bbox_head output.
+ rois (tuple): roi and rois_num of rpn_head output.
+ im_shape (Tensor): The shape of the input image.
+ scale_factor (Tensor): The scale factor of the input image.
+ Returns:
+ bbox_pred (Tensor): The output prediction with shape [N, 6], including
+ labels, scores and bboxes. The size of bboxes are corresponding
+ to the input image, the bboxes may be used in other branch.
+ bbox_num (Tensor): The number of prediction boxes of each batch with
+ shape [1], and is N.
+ """
+ if self.nms is not None:
+ bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
+ bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
+ else:
+ bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
+ scale_factor)
+ return bbox_pred, bbox_num
+
+ def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
+ """
+ Rescale, clip and filter the bbox from the output of NMS to
+ get final prediction.
+
+ Notes:
+ Currently only support bs = 1.
+
+ Args:
+ bboxes (Tensor): The output bboxes with shape [N, 6] after decode
+ and NMS, including labels, scores and bboxes.
+ bbox_num (Tensor): The number of prediction boxes of each batch with
+ shape [1], and is N.
+ im_shape (Tensor): The shape of the input image.
+ scale_factor (Tensor): The scale factor of the input image.
+ Returns:
+ pred_result (Tensor): The final prediction results with shape [N, 6]
+ including labels, scores and bboxes.
+ """
+
+ if bboxes.shape[0] == 0:
+ bboxes = self.fake_bboxes
+ bbox_num = self.fake_bbox_num
+
+ origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
+
+ origin_shape_list = []
+ scale_factor_list = []
+ # scale_factor: scale_y, scale_x
+ for i in range(bbox_num.shape[0]):
+ expand_shape = paddle.expand(origin_shape[i:i + 1, :],
+ [bbox_num[i], 2])
+ scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
+ scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
+ expand_scale = paddle.expand(scale, [bbox_num[i], 4])
+ origin_shape_list.append(expand_shape)
+ scale_factor_list.append(expand_scale)
+
+ self.origin_shape_list = paddle.concat(origin_shape_list)
+ scale_factor_list = paddle.concat(scale_factor_list)
+
+ # bboxes: [N, 6], label, score, bbox
+ pred_label = bboxes[:, 0:1]
+ pred_score = bboxes[:, 1:2]
+ pred_bbox = bboxes[:, 2:]
+ # rescale bbox to original image
+ scaled_bbox = pred_bbox / scale_factor_list
+ origin_h = self.origin_shape_list[:, 0]
+ origin_w = self.origin_shape_list[:, 1]
+ zeros = paddle.zeros_like(origin_h)
+ # clip bbox to [0, original_size]
+ x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
+ y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
+ x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
+ y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
+ pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
+ # filter empty bbox
+ keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
+ keep_mask = paddle.unsqueeze(keep_mask, [1])
+ pred_label = paddle.where(keep_mask, pred_label,
+ paddle.ones_like(pred_label) * -1)
+ pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
+ return pred_result
+
+ def get_origin_shape(self, ):
+ return self.origin_shape_list
+
+
+@register
+class MaskPostProcess(object):
+ """
+ refer to:
+ https://github.com/facebookresearch/detectron2/layers/mask_ops.py
+
+ Get Mask output according to the output from model
+ """
+
+ def __init__(self, binary_thresh=0.5):
+ super(MaskPostProcess, self).__init__()
+ self.binary_thresh = binary_thresh
+
+ def paste_mask(self, masks, boxes, im_h, im_w):
+ """
+ Paste the mask prediction to the original image.
+ """
+ x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
+ masks = paddle.unsqueeze(masks, [0, 1])
+ img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
+ img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
+ img_y = (img_y - y0) / (y1 - y0) * 2 - 1
+ img_x = (img_x - x0) / (x1 - x0) * 2 - 1
+ img_x = paddle.unsqueeze(img_x, [1])
+ img_y = paddle.unsqueeze(img_y, [2])
+ N = boxes.shape[0]
+
+ gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
+ gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
+ grid = paddle.stack([gx, gy], axis=3)
+ img_masks = F.grid_sample(masks, grid, align_corners=False)
+ return img_masks[:, 0]
+
+ def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
+ """
+ Decode the mask_out and paste the mask to the origin image.
+
+ Args:
+ mask_out (Tensor): mask_head output with shape [N, 28, 28].
+ bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
+ and NMS, including labels, scores and bboxes.
+ bbox_num (Tensor): The number of prediction boxes of each batch with
+ shape [1], and is N.
+ origin_shape (Tensor): The origin shape of the input image, the tensor
+ shape is [N, 2], and each row is [h, w].
+ Returns:
+ pred_result (Tensor): The final prediction mask results with shape
+ [N, h, w] in binary mask style.
+ """
+ num_mask = mask_out.shape[0]
+ origin_shape = paddle.cast(origin_shape, 'int32')
+ # TODO: support bs > 1 and mask output dtype is bool
+ pred_result = paddle.zeros(
+ [num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
+ if bbox_num == 1 and bboxes[0][0] == -1:
+ return pred_result
+
+ # TODO: optimize chunk paste
+ pred_result = []
+ for i in range(bboxes.shape[0]):
+ im_h, im_w = origin_shape[i][0], origin_shape[i][1]
+ pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
+ im_w)
+ pred_mask = pred_mask >= self.binary_thresh
+ pred_mask = paddle.cast(pred_mask, 'int32')
+ pred_result.append(pred_mask)
+ pred_result = paddle.concat(pred_result)
+ return pred_result
+
+
+@register
+class FCOSPostProcess(object):
+ __inject__ = ['decode', 'nms']
+
+ def __init__(self, decode=None, nms=None):
+ super(FCOSPostProcess, self).__init__()
+ self.decode = decode
+ self.nms = nms
+
+ def __call__(self, fcos_head_outs, scale_factor):
+ """
+ Decode the bbox and do NMS in FCOS.
+ """
+ locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
+ bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
+ centerness, scale_factor)
+ bbox_pred, bbox_num, _ = self.nms(bboxes, score)
+ return bbox_pred, bbox_num
+
+
+@register
+class S2ANetBBoxPostProcess(nn.Layer):
+ __shared__ = ['num_classes']
+ __inject__ = ['nms']
+
+ def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
+ super(S2ANetBBoxPostProcess, self).__init__()
+ self.num_classes = num_classes
+ self.nms_pre = paddle.to_tensor(nms_pre)
+ self.min_bbox_size = min_bbox_size
+ self.nms = nms
+ self.origin_shape_list = []
+ self.fake_pred_cls_score_bbox = paddle.to_tensor(
+ np.array(
+ [[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
+ dtype='float32'))
+ self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
+
+ def forward(self, pred_scores, pred_bboxes):
+ """
+ pred_scores : [N, M] score
+ pred_bboxes : [N, 5] xc, yc, w, h, a
+ im_shape : [N, 2] im_shape
+ scale_factor : [N, 2] scale_factor
+ """
+ pred_ploys0 = rbox2poly(pred_bboxes)
+ pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
+
+ # pred_scores [NA, 16] --> [16, NA]
+ pred_scores0 = paddle.transpose(pred_scores, [1, 0])
+ pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
+
+ pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
+ self.num_classes)
+ # Prevent empty bbox_pred from decode or NMS.
+ # Bboxes and score before NMS may be empty due to the score threshold.
+ if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
+ 1] <= 1:
+ pred_cls_score_bbox = self.fake_pred_cls_score_bbox
+ bbox_num = self.fake_bbox_num
+
+ pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
+ return pred_cls_score_bbox, bbox_num
+
+ def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
+ """
+ Rescale, clip and filter the bbox from the output of NMS to
+ get final prediction.
+ Args:
+ bboxes(Tensor): bboxes [N, 10]
+ bbox_num(Tensor): bbox_num
+ im_shape(Tensor): [1 2]
+ scale_factor(Tensor): [1 2]
+ Returns:
+ bbox_pred(Tensor): The output is the prediction with shape [N, 8]
+ including labels, scores and bboxes. The size of
+ bboxes are corresponding to the original image.
+ """
+ origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
+
+ origin_shape_list = []
+ scale_factor_list = []
+ # scale_factor: scale_y, scale_x
+ for i in range(bbox_num.shape[0]):
+ expand_shape = paddle.expand(origin_shape[i:i + 1, :],
+ [bbox_num[i], 2])
+ scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
+ scale = paddle.concat([
+ scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
+ scale_y
+ ])
+ expand_scale = paddle.expand(scale, [bbox_num[i], 8])
+ origin_shape_list.append(expand_shape)
+ scale_factor_list.append(expand_scale)
+
+ origin_shape_list = paddle.concat(origin_shape_list)
+ scale_factor_list = paddle.concat(scale_factor_list)
+
+ # bboxes: [N, 10], label, score, bbox
+ pred_label_score = bboxes[:, 0:2]
+ pred_bbox = bboxes[:, 2:]
+
+ # rescale bbox to original image
+ pred_bbox = pred_bbox.reshape([-1, 8])
+ scaled_bbox = pred_bbox / scale_factor_list
+ origin_h = origin_shape_list[:, 0]
+ origin_w = origin_shape_list[:, 1]
+
+ bboxes = scaled_bbox
+ zeros = paddle.zeros_like(origin_h)
+ x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
+ y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
+ x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
+ y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
+ x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
+ y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
+ x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
+ y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
+ pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
+ pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
+ return pred_result
+
+
+@register
+class JDEBBoxPostProcess(nn.Layer):
+ __shared__ = ['num_classes']
+ __inject__ = ['decode', 'nms']
+
+ def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
+ super(JDEBBoxPostProcess, self).__init__()
+ self.num_classes = num_classes
+ self.decode = decode
+ self.nms = nms
+ self.return_idx = return_idx
+
+ self.fake_bbox_pred = paddle.to_tensor(
+ np.array(
+ [[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
+ self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
+ self.fake_nms_keep_idx = paddle.to_tensor(
+ np.array(
+ [[0]], dtype='int32'))
+
+ self.fake_yolo_boxes_out = paddle.to_tensor(
+ np.array(
+ [[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
+ self.fake_yolo_scores_out = paddle.to_tensor(
+ np.array(
+ [[[0.0]]], dtype='float32'))
+ self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
+
+ def forward(self, head_out, anchors):
+ """
+ Decode the bbox and do NMS for JDE model.
+
+ Args:
+ head_out (list): Bbox_pred and cls_prob of bbox_head output.
+ anchors (list): Anchors of JDE model.
+
+ Returns:
+ boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
+ bbox_pred (Tensor): The output is the prediction with shape [N, 6]
+ including labels, scores and bboxes.
+ bbox_num (Tensor): The number of prediction of each batch with shape [N].
+ nms_keep_idx (Tensor): The index of kept bboxes after NMS.
+ """
+ boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
+
+ if len(boxes_idx) == 0:
+ boxes_idx = self.fake_boxes_idx
+ yolo_boxes_out = self.fake_yolo_boxes_out
+ yolo_scores_out = self.fake_yolo_scores_out
+ else:
+ yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
+ # TODO: only support bs=1 now
+ yolo_boxes_out = paddle.reshape(
+ yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
+ yolo_scores_out = paddle.reshape(
+ yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
+ boxes_idx = boxes_idx[:, 1:]
+
+ if self.return_idx:
+ bbox_pred, bbox_num, nms_keep_idx = self.nms(
+ yolo_boxes_out, yolo_scores_out, self.num_classes)
+ if bbox_pred.shape[0] == 0:
+ bbox_pred = self.fake_bbox_pred
+ bbox_num = self.fake_bbox_num
+ nms_keep_idx = self.fake_nms_keep_idx
+ return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
+ else:
+ bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
+ self.num_classes)
+ if bbox_pred.shape[0] == 0:
+ bbox_pred = self.fake_bbox_pred
+ bbox_num = self.fake_bbox_num
+ return _, bbox_pred, bbox_num, _
+
+
+@register
+class CenterNetPostProcess(TTFBox):
+ """
+ Postprocess the model outputs to get final prediction:
+ 1. Do NMS for heatmap to get top `max_per_img` bboxes.
+ 2. Decode bboxes using center offset and box size.
+ 3. Rescale decoded bboxes reference to the origin image shape.
+
+ Args:
+ max_per_img(int): the maximum number of predicted objects in a image,
+ 500 by default.
+ down_ratio(int): the down ratio from images to heatmap, 4 by default.
+ regress_ltrb (bool): whether to regress left/top/right/bottom or
+ width/height for a box, true by default.
+ for_mot (bool): whether return other features used in tracking model.
+ """
+
+ __shared__ = ['down_ratio', 'for_mot']
+
+ def __init__(self,
+ max_per_img=500,
+ down_ratio=4,
+ regress_ltrb=True,
+ for_mot=False):
+ super(TTFBox, self).__init__()
+ self.max_per_img = max_per_img
+ self.down_ratio = down_ratio
+ self.regress_ltrb = regress_ltrb
+ self.for_mot = for_mot
+
+ def __call__(self, hm, wh, reg, im_shape, scale_factor):
+ heat = self._simple_nms(hm)
+ scores, inds, topk_clses, ys, xs = self._topk(heat)
+ scores = scores.unsqueeze(1)
+ clses = topk_clses.unsqueeze(1)
+
+ reg_t = paddle.transpose(reg, [0, 2, 3, 1])
+ # Like TTFBox, batch size is 1.
+ # TODO: support batch size > 1
+ reg = paddle.reshape(reg_t, [-1, reg_t.shape[-1]])
+ reg = paddle.gather(reg, inds)
+ xs = paddle.cast(xs, 'float32')
+ ys = paddle.cast(ys, 'float32')
+ xs = xs + reg[:, 0:1]
+ ys = ys + reg[:, 1:2]
+
+ wh_t = paddle.transpose(wh, [0, 2, 3, 1])
+ wh = paddle.reshape(wh_t, [-1, wh_t.shape[-1]])
+ wh = paddle.gather(wh, inds)
+
+ if self.regress_ltrb:
+ x1 = xs - wh[:, 0:1]
+ y1 = ys - wh[:, 1:2]
+ x2 = xs + wh[:, 2:3]
+ y2 = ys + wh[:, 3:4]
+ else:
+ x1 = xs - wh[:, 0:1] / 2
+ y1 = ys - wh[:, 1:2] / 2
+ x2 = xs + wh[:, 0:1] / 2
+ y2 = ys + wh[:, 1:2] / 2
+
+ n, c, feat_h, feat_w = hm.shape[:]
+ padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
+ padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
+ x1 = x1 * self.down_ratio
+ y1 = y1 * self.down_ratio
+ x2 = x2 * self.down_ratio
+ y2 = y2 * self.down_ratio
+
+ x1 = x1 - padw
+ y1 = y1 - padh
+ x2 = x2 - padw
+ y2 = y2 - padh
+
+ bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
+ scale_y = scale_factor[:, 0:1]
+ scale_x = scale_factor[:, 1:2]
+ scale_expand = paddle.concat(
+ [scale_x, scale_y, scale_x, scale_y], axis=1)
+ boxes_shape = bboxes.shape[:]
+ scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
+ bboxes = paddle.divide(bboxes, scale_expand)
+ if self.for_mot:
+ results = paddle.concat([bboxes, scores, clses], axis=1)
+ return results, inds, topk_clses
+ else:
+ results = paddle.concat([clses, scores, bboxes], axis=1)
+ return results, paddle.shape(results)[0:1], topk_clses
+
+
+@register
+class DETRBBoxPostProcess(object):
+ __shared__ = ['num_classes', 'use_focal_loss']
+ __inject__ = []
+
+ def __init__(self,
+ num_classes=80,
+ num_top_queries=100,
+ use_focal_loss=False):
+ super(DETRBBoxPostProcess, self).__init__()
+ self.num_classes = num_classes
+ self.num_top_queries = num_top_queries
+ self.use_focal_loss = use_focal_loss
+
+ def __call__(self, head_out, im_shape, scale_factor):
+ """
+ Decode the bbox.
+
+ Args:
+ head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
+ im_shape (Tensor): The shape of the input image.
+ scale_factor (Tensor): The scale factor of the input image.
+ Returns:
+ bbox_pred (Tensor): The output prediction with shape [N, 6], including
+ labels, scores and bboxes. The size of bboxes are corresponding
+ to the input image, the bboxes may be used in other branch.
+ bbox_num (Tensor): The number of prediction boxes of each batch with
+ shape [bs], and is N.
+ """
+ bboxes, logits, masks = head_out
+
+ bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
+ origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
+ img_h, img_w = origin_shape.unbind(1)
+ origin_shape = paddle.stack(
+ [img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
+ bbox_pred *= origin_shape
+
+ scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
+ logits)[:, :, :-1]
+
+ if not self.use_focal_loss:
+ scores, labels = scores.max(-1), scores.argmax(-1)
+ if scores.shape[1] > self.num_top_queries:
+ scores, index = paddle.topk(
+ scores, self.num_top_queries, axis=-1)
+ labels = paddle.stack(
+ [paddle.gather(l, i) for l, i in zip(labels, index)])
+ bbox_pred = paddle.stack(
+ [paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
+ else:
+ scores, index = paddle.topk(
+ scores.reshape([logits.shape[0], -1]),
+ self.num_top_queries,
+ axis=-1)
+ labels = index % logits.shape[2]
+ index = index // logits.shape[2]
+ bbox_pred = paddle.stack(
+ [paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
+
+ bbox_pred = paddle.concat(
+ [
+ labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
+ bbox_pred
+ ],
+ axis=-1)
+ bbox_num = paddle.to_tensor(
+ bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
+ bbox_pred = bbox_pred.reshape([-1, 6])
+ return bbox_pred, bbox_num
+
+
+@register
+class SparsePostProcess(object):
+ __shared__ = ['num_classes']
+
+ def __init__(self, num_proposals, num_classes=80):
+ super(SparsePostProcess, self).__init__()
+ self.num_classes = num_classes
+ self.num_proposals = num_proposals
+
+ def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
+ """
+ Arguments:
+ box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
+ The tensor predicts the classification probability for each proposal.
+ box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
+ The tensor predicts 4-vector (x,y,w,h) box
+ regression values for every proposal
+ scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
+ img_whwh (Tensor): tensors of shape [batch_size, 4]
+ Returns:
+ bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
+ [label, confidence, xmin, ymin, xmax, ymax]
+ bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
+ """
+ assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
+
+ img_wh = img_whwh[:, :2]
+
+ scores = F.sigmoid(box_cls)
+ labels = paddle.arange(0, self.num_classes). \
+ unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
+
+ classes_all = []
+ scores_all = []
+ boxes_all = []
+ for i, (scores_per_image,
+ box_pred_per_image) in enumerate(zip(scores, box_pred)):
+
+ scores_per_image, topk_indices = scores_per_image.flatten(
+ 0, 1).topk(
+ self.num_proposals, sorted=False)
+ labels_per_image = paddle.gather(labels, topk_indices, axis=0)
+
+ box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
+ [1, self.num_classes, 1]).reshape([-1, 4])
+ box_pred_per_image = paddle.gather(
+ box_pred_per_image, topk_indices, axis=0)
+
+ classes_all.append(labels_per_image)
+ scores_all.append(scores_per_image)
+ boxes_all.append(box_pred_per_image)
+
+ bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
+ boxes_final = []
+
+ for i in range(len(scale_factor_wh)):
+ classes = classes_all[i]
+ boxes = boxes_all[i]
+ scores = scores_all[i]
+
+ boxes[:, 0::2] = paddle.clip(
+ boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
+ boxes[:, 1::2] = paddle.clip(
+ boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
+ boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
+ boxes[:, 3] - boxes[:, 1]).numpy()
+
+ keep = (boxes_w > 1.) & (boxes_h > 1.)
+
+ if (keep.sum() == 0):
+ bboxes = paddle.zeros([1, 6]).astype("float32")
+ else:
+ boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
+ classes = paddle.to_tensor(classes.numpy()[keep]).astype(
+ "float32").unsqueeze(-1)
+ scores = paddle.to_tensor(scores.numpy()[keep]).astype(
+ "float32").unsqueeze(-1)
+
+ bboxes = paddle.concat([classes, scores, boxes], axis=-1)
+
+ boxes_final.append(bboxes)
+ bbox_num[i] = bboxes.shape[0]
+
+ bbox_pred = paddle.concat(boxes_final)
+ return bbox_pred, bbox_num
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__init__.py
new file mode 100644
index 000000000..9fb518f2a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__init__.py
@@ -0,0 +1,2 @@
+from . import rpn_head
+from .rpn_head import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..3e5c82d31
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/anchor_generator.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/anchor_generator.cpython-37.pyc
new file mode 100644
index 000000000..db9b88dd8
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/anchor_generator.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/proposal_generator.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/proposal_generator.cpython-37.pyc
new file mode 100644
index 000000000..950055728
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/proposal_generator.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/rpn_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/rpn_head.cpython-37.pyc
new file mode 100644
index 000000000..797bb0a85
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/rpn_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/target.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/target.cpython-37.pyc
new file mode 100644
index 000000000..8d5c58349
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/target.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/target_layer.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/target_layer.cpython-37.pyc
new file mode 100644
index 000000000..79ee00500
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/__pycache__/target_layer.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/anchor_generator.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/anchor_generator.py
new file mode 100644
index 000000000..34f03c0ef
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/anchor_generator.py
@@ -0,0 +1,131 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on
+# https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/anchor_generator.py
+
+import math
+
+import paddle
+import paddle.nn as nn
+
+from ppdet.core.workspace import register
+
+
+@register
+class AnchorGenerator(nn.Layer):
+ """
+ Generate anchors according to the feature maps
+
+ Args:
+ anchor_sizes (list[float] | list[list[float]]): The anchor sizes at
+ each feature point. list[float] means all feature levels share the
+ same sizes. list[list[float]] means the anchor sizes for
+ each level. The sizes stand for the scale of input size.
+ aspect_ratios (list[float] | list[list[float]]): The aspect ratios at
+ each feature point. list[float] means all feature levels share the
+ same ratios. list[list[float]] means the aspect ratios for
+ each level.
+ strides (list[float]): The strides of feature maps which generate
+ anchors
+ offset (float): The offset of the coordinate of anchors, default 0.
+
+ """
+
+ def __init__(self,
+ anchor_sizes=[32, 64, 128, 256, 512],
+ aspect_ratios=[0.5, 1.0, 2.0],
+ strides=[16.0],
+ variance=[1.0, 1.0, 1.0, 1.0],
+ offset=0.):
+ super(AnchorGenerator, self).__init__()
+ self.anchor_sizes = anchor_sizes
+ self.aspect_ratios = aspect_ratios
+ self.strides = strides
+ self.variance = variance
+ self.cell_anchors = self._calculate_anchors(len(strides))
+ self.offset = offset
+
+ def _broadcast_params(self, params, num_features):
+ if not isinstance(params[0], (list, tuple)): # list[float]
+ return [params] * num_features
+ if len(params) == 1:
+ return list(params) * num_features
+ return params
+
+ def generate_cell_anchors(self, sizes, aspect_ratios):
+ anchors = []
+ for size in sizes:
+ area = size**2.0
+ for aspect_ratio in aspect_ratios:
+ w = math.sqrt(area / aspect_ratio)
+ h = aspect_ratio * w
+ x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0
+ anchors.append([x0, y0, x1, y1])
+ return paddle.to_tensor(anchors, dtype='float32')
+
+ def _calculate_anchors(self, num_features):
+ sizes = self._broadcast_params(self.anchor_sizes, num_features)
+ aspect_ratios = self._broadcast_params(self.aspect_ratios, num_features)
+ cell_anchors = [
+ self.generate_cell_anchors(s, a)
+ for s, a in zip(sizes, aspect_ratios)
+ ]
+ [
+ self.register_buffer(
+ t.name, t, persistable=False) for t in cell_anchors
+ ]
+ return cell_anchors
+
+ def _create_grid_offsets(self, size, stride, offset):
+ grid_height, grid_width = size[0], size[1]
+ shifts_x = paddle.arange(
+ offset * stride, grid_width * stride, step=stride, dtype='float32')
+ shifts_y = paddle.arange(
+ offset * stride, grid_height * stride, step=stride, dtype='float32')
+ shift_y, shift_x = paddle.meshgrid(shifts_y, shifts_x)
+ shift_x = paddle.reshape(shift_x, [-1])
+ shift_y = paddle.reshape(shift_y, [-1])
+ return shift_x, shift_y
+
+ def _grid_anchors(self, grid_sizes):
+ anchors = []
+ for size, stride, base_anchors in zip(grid_sizes, self.strides,
+ self.cell_anchors):
+ shift_x, shift_y = self._create_grid_offsets(size, stride,
+ self.offset)
+ shifts = paddle.stack((shift_x, shift_y, shift_x, shift_y), axis=1)
+ shifts = paddle.reshape(shifts, [-1, 1, 4])
+ base_anchors = paddle.reshape(base_anchors, [1, -1, 4])
+
+ anchors.append(paddle.reshape(shifts + base_anchors, [-1, 4]))
+
+ return anchors
+
+ def forward(self, input):
+ grid_sizes = [paddle.shape(feature_map)[-2:] for feature_map in input]
+ anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
+ return anchors_over_all_feature_maps
+
+ @property
+ def num_anchors(self):
+ """
+ Returns:
+ int: number of anchors at every pixel
+ location, on that feature map.
+ For example, if at every pixel we use anchors of 3 aspect
+ ratios and 5 sizes, the number of anchors is 15.
+ For FPN models, `num_anchors` on every feature map is the same.
+ """
+ return len(self.cell_anchors[0])
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/proposal_generator.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/proposal_generator.py
new file mode 100644
index 000000000..1fcb8b1e2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/proposal_generator.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+
+from ppdet.core.workspace import register, serializable
+from .. import ops
+
+
+@register
+@serializable
+class ProposalGenerator(object):
+ """
+ Proposal generation module
+
+ For more details, please refer to the document of generate_proposals
+ in ppdet/modeing/ops.py
+
+ Args:
+ pre_nms_top_n (int): Number of total bboxes to be kept per
+ image before NMS. default 6000
+ post_nms_top_n (int): Number of total bboxes to be kept per
+ image after NMS. default 1000
+ nms_thresh (float): Threshold in NMS. default 0.5
+ min_size (flaot): Remove predicted boxes with either height or
+ width < min_size. default 0.1
+ eta (float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
+ `adaptive_threshold = adaptive_threshold * eta` in each iteration.
+ default 1.
+ topk_after_collect (bool): whether to adopt topk after batch
+ collection. If topk_after_collect is true, box filter will not be
+ used after NMS at each image in proposal generation. default false
+ """
+
+ def __init__(self,
+ pre_nms_top_n=12000,
+ post_nms_top_n=2000,
+ nms_thresh=.5,
+ min_size=.1,
+ eta=1.,
+ topk_after_collect=False):
+ super(ProposalGenerator, self).__init__()
+ self.pre_nms_top_n = pre_nms_top_n
+ self.post_nms_top_n = post_nms_top_n
+ self.nms_thresh = nms_thresh
+ self.min_size = min_size
+ self.eta = eta
+ self.topk_after_collect = topk_after_collect
+
+ def __call__(self, scores, bbox_deltas, anchors, im_shape):
+
+ top_n = self.pre_nms_top_n if self.topk_after_collect else self.post_nms_top_n
+ variances = paddle.ones_like(anchors)
+ rpn_rois, rpn_rois_prob, rpn_rois_num = ops.generate_proposals(
+ scores,
+ bbox_deltas,
+ im_shape,
+ anchors,
+ variances,
+ pre_nms_top_n=self.pre_nms_top_n,
+ post_nms_top_n=top_n,
+ nms_thresh=self.nms_thresh,
+ min_size=self.min_size,
+ eta=self.eta,
+ return_rois_num=True)
+ return rpn_rois, rpn_rois_prob, rpn_rois_num, self.post_nms_top_n
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/rpn_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/rpn_head.py
new file mode 100644
index 000000000..1664d7839
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/rpn_head.py
@@ -0,0 +1,259 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import Normal
+
+from ppdet.core.workspace import register
+from .anchor_generator import AnchorGenerator
+from .target_layer import RPNTargetAssign
+from .proposal_generator import ProposalGenerator
+
+
+class RPNFeat(nn.Layer):
+ """
+ Feature extraction in RPN head
+
+ Args:
+ in_channel (int): Input channel
+ out_channel (int): Output channel
+ """
+
+ def __init__(self, in_channel=1024, out_channel=1024):
+ super(RPNFeat, self).__init__()
+ # rpn feat is shared with each level
+ self.rpn_conv = nn.Conv2D(
+ in_channels=in_channel,
+ out_channels=out_channel,
+ kernel_size=3,
+ padding=1,
+ weight_attr=paddle.ParamAttr(initializer=Normal(
+ mean=0., std=0.01)))
+ self.rpn_conv.skip_quant = True
+
+ def forward(self, feats):
+ rpn_feats = []
+ for feat in feats:
+ rpn_feats.append(F.relu(self.rpn_conv(feat)))
+ return rpn_feats
+
+
+@register
+class RPNHead(nn.Layer):
+ """
+ Region Proposal Network
+
+ Args:
+ anchor_generator (dict): configure of anchor generation
+ rpn_target_assign (dict): configure of rpn targets assignment
+ train_proposal (dict): configure of proposals generation
+ at the stage of training
+ test_proposal (dict): configure of proposals generation
+ at the stage of prediction
+ in_channel (int): channel of input feature maps which can be
+ derived by from_config
+ """
+
+ def __init__(self,
+ anchor_generator=AnchorGenerator().__dict__,
+ rpn_target_assign=RPNTargetAssign().__dict__,
+ train_proposal=ProposalGenerator(12000, 2000).__dict__,
+ test_proposal=ProposalGenerator().__dict__,
+ in_channel=1024):
+ super(RPNHead, self).__init__()
+ self.anchor_generator = anchor_generator
+ self.rpn_target_assign = rpn_target_assign
+ self.train_proposal = train_proposal
+ self.test_proposal = test_proposal
+ if isinstance(anchor_generator, dict):
+ self.anchor_generator = AnchorGenerator(**anchor_generator)
+ if isinstance(rpn_target_assign, dict):
+ self.rpn_target_assign = RPNTargetAssign(**rpn_target_assign)
+ if isinstance(train_proposal, dict):
+ self.train_proposal = ProposalGenerator(**train_proposal)
+ if isinstance(test_proposal, dict):
+ self.test_proposal = ProposalGenerator(**test_proposal)
+
+ num_anchors = self.anchor_generator.num_anchors
+ self.rpn_feat = RPNFeat(in_channel, in_channel)
+ # rpn head is shared with each level
+ # rpn roi classification scores
+ self.rpn_rois_score = nn.Conv2D(
+ in_channels=in_channel,
+ out_channels=num_anchors,
+ kernel_size=1,
+ padding=0,
+ weight_attr=paddle.ParamAttr(initializer=Normal(
+ mean=0., std=0.01)))
+ self.rpn_rois_score.skip_quant = True
+
+ # rpn roi bbox regression deltas
+ self.rpn_rois_delta = nn.Conv2D(
+ in_channels=in_channel,
+ out_channels=4 * num_anchors,
+ kernel_size=1,
+ padding=0,
+ weight_attr=paddle.ParamAttr(initializer=Normal(
+ mean=0., std=0.01)))
+ self.rpn_rois_delta.skip_quant = True
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ # FPN share same rpn head
+ if isinstance(input_shape, (list, tuple)):
+ input_shape = input_shape[0]
+ return {'in_channel': input_shape.channels}
+
+ def forward(self, feats, inputs):
+ rpn_feats = self.rpn_feat(feats)
+ scores = []
+ deltas = []
+
+ for rpn_feat in rpn_feats:
+ rrs = self.rpn_rois_score(rpn_feat)
+ rrd = self.rpn_rois_delta(rpn_feat)
+ scores.append(rrs)
+ deltas.append(rrd)
+
+ anchors = self.anchor_generator(rpn_feats)
+
+ rois, rois_num = self._gen_proposal(scores, deltas, anchors, inputs)
+ if self.training:
+ loss = self.get_loss(scores, deltas, anchors, inputs)
+ return rois, rois_num, loss
+ else:
+ return rois, rois_num, None
+
+ def _gen_proposal(self, scores, bbox_deltas, anchors, inputs):
+ """
+ scores (list[Tensor]): Multi-level scores prediction
+ bbox_deltas (list[Tensor]): Multi-level deltas prediction
+ anchors (list[Tensor]): Multi-level anchors
+ inputs (dict): ground truth info
+ """
+ prop_gen = self.train_proposal if self.training else self.test_proposal
+ im_shape = inputs['im_shape']
+
+ # Collect multi-level proposals for each batch
+ # Get 'topk' of them as final output
+ bs_rois_collect = []
+ bs_rois_num_collect = []
+ batch_size = paddle.slice(paddle.shape(im_shape), [0], [0], [1])
+
+ # Generate proposals for each level and each batch.
+ # Discard batch-computing to avoid sorting bbox cross different batches.
+ for i in range(batch_size):
+ rpn_rois_list = []
+ rpn_prob_list = []
+ rpn_rois_num_list = []
+
+ for rpn_score, rpn_delta, anchor in zip(scores, bbox_deltas,
+ anchors):
+ rpn_rois, rpn_rois_prob, rpn_rois_num, post_nms_top_n = prop_gen(
+ scores=rpn_score[i:i + 1],
+ bbox_deltas=rpn_delta[i:i + 1],
+ anchors=anchor,
+ im_shape=im_shape[i:i + 1])
+ if rpn_rois.shape[0] > 0:
+ rpn_rois_list.append(rpn_rois)
+ rpn_prob_list.append(rpn_rois_prob)
+ rpn_rois_num_list.append(rpn_rois_num)
+
+ if len(scores) > 1:
+ rpn_rois = paddle.concat(rpn_rois_list)
+ rpn_prob = paddle.concat(rpn_prob_list).flatten()
+
+ if rpn_prob.shape[0] > post_nms_top_n:
+ topk_prob, topk_inds = paddle.topk(rpn_prob, post_nms_top_n)
+ topk_rois = paddle.gather(rpn_rois, topk_inds)
+ else:
+ topk_rois = rpn_rois
+ topk_prob = rpn_prob
+ else:
+ topk_rois = rpn_rois_list[0]
+ topk_prob = rpn_prob_list[0].flatten()
+
+ bs_rois_collect.append(topk_rois)
+ bs_rois_num_collect.append(paddle.shape(topk_rois)[0])
+
+ bs_rois_num_collect = paddle.concat(bs_rois_num_collect)
+
+ return bs_rois_collect, bs_rois_num_collect
+
+ def get_loss(self, pred_scores, pred_deltas, anchors, inputs):
+ """
+ pred_scores (list[Tensor]): Multi-level scores prediction
+ pred_deltas (list[Tensor]): Multi-level deltas prediction
+ anchors (list[Tensor]): Multi-level anchors
+ inputs (dict): ground truth info, including im, gt_bbox, gt_score
+ """
+ anchors = [paddle.reshape(a, shape=(-1, 4)) for a in anchors]
+ anchors = paddle.concat(anchors)
+
+ scores = [
+ paddle.reshape(
+ paddle.transpose(
+ v, perm=[0, 2, 3, 1]),
+ shape=(v.shape[0], -1, 1)) for v in pred_scores
+ ]
+ scores = paddle.concat(scores, axis=1)
+
+ deltas = [
+ paddle.reshape(
+ paddle.transpose(
+ v, perm=[0, 2, 3, 1]),
+ shape=(v.shape[0], -1, 4)) for v in pred_deltas
+ ]
+ deltas = paddle.concat(deltas, axis=1)
+
+ score_tgt, bbox_tgt, loc_tgt, norm = self.rpn_target_assign(inputs,
+ anchors)
+
+ scores = paddle.reshape(x=scores, shape=(-1, ))
+ deltas = paddle.reshape(x=deltas, shape=(-1, 4))
+
+ score_tgt = paddle.concat(score_tgt)
+ score_tgt.stop_gradient = True
+
+ pos_mask = score_tgt == 1
+ pos_ind = paddle.nonzero(pos_mask)
+
+ valid_mask = score_tgt >= 0
+ valid_ind = paddle.nonzero(valid_mask)
+
+ # cls loss
+ if valid_ind.shape[0] == 0:
+ loss_rpn_cls = paddle.zeros([1], dtype='float32')
+ else:
+ score_pred = paddle.gather(scores, valid_ind)
+ score_label = paddle.gather(score_tgt, valid_ind).cast('float32')
+ score_label.stop_gradient = True
+ loss_rpn_cls = F.binary_cross_entropy_with_logits(
+ logit=score_pred, label=score_label, reduction="sum")
+
+ # reg loss
+ if pos_ind.shape[0] == 0:
+ loss_rpn_reg = paddle.zeros([1], dtype='float32')
+ else:
+ loc_pred = paddle.gather(deltas, pos_ind)
+ loc_tgt = paddle.concat(loc_tgt)
+ loc_tgt = paddle.gather(loc_tgt, pos_ind)
+ loc_tgt.stop_gradient = True
+ loss_rpn_reg = paddle.abs(loc_pred - loc_tgt).sum()
+ return {
+ 'loss_rpn_cls': loss_rpn_cls / norm,
+ 'loss_rpn_reg': loss_rpn_reg / norm
+ }
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/target.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/target.py
new file mode 100644
index 000000000..af83cfdb8
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/target.py
@@ -0,0 +1,675 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import paddle
+from ..bbox_utils import bbox2delta, bbox_overlaps
+
+
+def rpn_anchor_target(anchors,
+ gt_boxes,
+ rpn_batch_size_per_im,
+ rpn_positive_overlap,
+ rpn_negative_overlap,
+ rpn_fg_fraction,
+ use_random=True,
+ batch_size=1,
+ ignore_thresh=-1,
+ is_crowd=None,
+ weights=[1., 1., 1., 1.],
+ assign_on_cpu=False):
+ tgt_labels = []
+ tgt_bboxes = []
+ tgt_deltas = []
+ for i in range(batch_size):
+ gt_bbox = gt_boxes[i]
+ is_crowd_i = is_crowd[i] if is_crowd else None
+ # Step1: match anchor and gt_bbox
+ matches, match_labels = label_box(
+ anchors, gt_bbox, rpn_positive_overlap, rpn_negative_overlap, True,
+ ignore_thresh, is_crowd_i, assign_on_cpu)
+ # Step2: sample anchor
+ fg_inds, bg_inds = subsample_labels(match_labels, rpn_batch_size_per_im,
+ rpn_fg_fraction, 0, use_random)
+ # Fill with the ignore label (-1), then set positive and negative labels
+ labels = paddle.full(match_labels.shape, -1, dtype='int32')
+ if bg_inds.shape[0] > 0:
+ labels = paddle.scatter(labels, bg_inds, paddle.zeros_like(bg_inds))
+ if fg_inds.shape[0] > 0:
+ labels = paddle.scatter(labels, fg_inds, paddle.ones_like(fg_inds))
+ # Step3: make output
+ if gt_bbox.shape[0] == 0:
+ matched_gt_boxes = paddle.zeros([0, 4])
+ tgt_delta = paddle.zeros([0, 4])
+ else:
+ matched_gt_boxes = paddle.gather(gt_bbox, matches)
+ tgt_delta = bbox2delta(anchors, matched_gt_boxes, weights)
+ matched_gt_boxes.stop_gradient = True
+ tgt_delta.stop_gradient = True
+ labels.stop_gradient = True
+ tgt_labels.append(labels)
+ tgt_bboxes.append(matched_gt_boxes)
+ tgt_deltas.append(tgt_delta)
+
+ return tgt_labels, tgt_bboxes, tgt_deltas
+
+
+def label_box(anchors,
+ gt_boxes,
+ positive_overlap,
+ negative_overlap,
+ allow_low_quality,
+ ignore_thresh,
+ is_crowd=None,
+ assign_on_cpu=False):
+ if assign_on_cpu:
+ paddle.set_device("cpu")
+ iou = bbox_overlaps(gt_boxes, anchors)
+ paddle.set_device("gpu")
+ else:
+ iou = bbox_overlaps(gt_boxes, anchors)
+ n_gt = gt_boxes.shape[0]
+ if n_gt == 0 or is_crowd is None:
+ n_gt_crowd = 0
+ else:
+ n_gt_crowd = paddle.nonzero(is_crowd).shape[0]
+ if iou.shape[0] == 0 or n_gt_crowd == n_gt:
+ # No truth, assign everything to background
+ default_matches = paddle.full((iou.shape[1], ), 0, dtype='int64')
+ default_match_labels = paddle.full((iou.shape[1], ), 0, dtype='int32')
+ return default_matches, default_match_labels
+ # if ignore_thresh > 0, remove anchor if it is closed to
+ # one of the crowded ground-truth
+ if n_gt_crowd > 0:
+ N_a = anchors.shape[0]
+ ones = paddle.ones([N_a])
+ mask = is_crowd * ones
+
+ if ignore_thresh > 0:
+ crowd_iou = iou * mask
+ valid = (paddle.sum((crowd_iou > ignore_thresh).cast('int32'),
+ axis=0) > 0).cast('float32')
+ iou = iou * (1 - valid) - valid
+
+ # ignore the iou between anchor and crowded ground-truth
+ iou = iou * (1 - mask) - mask
+
+ matched_vals, matches = paddle.topk(iou, k=1, axis=0)
+ match_labels = paddle.full(matches.shape, -1, dtype='int32')
+ # set ignored anchor with iou = -1
+ neg_cond = paddle.logical_and(matched_vals > -1,
+ matched_vals < negative_overlap)
+ match_labels = paddle.where(neg_cond,
+ paddle.zeros_like(match_labels), match_labels)
+ match_labels = paddle.where(matched_vals >= positive_overlap,
+ paddle.ones_like(match_labels), match_labels)
+ if allow_low_quality:
+ highest_quality_foreach_gt = iou.max(axis=1, keepdim=True)
+ pred_inds_with_highest_quality = paddle.logical_and(
+ iou > 0, iou == highest_quality_foreach_gt).cast('int32').sum(
+ 0, keepdim=True)
+ match_labels = paddle.where(pred_inds_with_highest_quality > 0,
+ paddle.ones_like(match_labels),
+ match_labels)
+
+ matches = matches.flatten()
+ match_labels = match_labels.flatten()
+
+ return matches, match_labels
+
+
+def subsample_labels(labels,
+ num_samples,
+ fg_fraction,
+ bg_label=0,
+ use_random=True):
+ positive = paddle.nonzero(
+ paddle.logical_and(labels != -1, labels != bg_label))
+ negative = paddle.nonzero(labels == bg_label)
+
+ fg_num = int(num_samples * fg_fraction)
+ fg_num = min(positive.numel(), fg_num)
+ bg_num = num_samples - fg_num
+ bg_num = min(negative.numel(), bg_num)
+ if fg_num == 0 and bg_num == 0:
+ fg_inds = paddle.zeros([0], dtype='int32')
+ bg_inds = paddle.zeros([0], dtype='int32')
+ return fg_inds, bg_inds
+
+ # randomly select positive and negative examples
+
+ negative = negative.cast('int32').flatten()
+ bg_perm = paddle.randperm(negative.numel(), dtype='int32')
+ bg_perm = paddle.slice(bg_perm, axes=[0], starts=[0], ends=[bg_num])
+ if use_random:
+ bg_inds = paddle.gather(negative, bg_perm)
+ else:
+ bg_inds = paddle.slice(negative, axes=[0], starts=[0], ends=[bg_num])
+ if fg_num == 0:
+ fg_inds = paddle.zeros([0], dtype='int32')
+ return fg_inds, bg_inds
+
+ positive = positive.cast('int32').flatten()
+ fg_perm = paddle.randperm(positive.numel(), dtype='int32')
+ fg_perm = paddle.slice(fg_perm, axes=[0], starts=[0], ends=[fg_num])
+ if use_random:
+ fg_inds = paddle.gather(positive, fg_perm)
+ else:
+ fg_inds = paddle.slice(positive, axes=[0], starts=[0], ends=[fg_num])
+
+ return fg_inds, bg_inds
+
+
+def generate_proposal_target(rpn_rois,
+ gt_classes,
+ gt_boxes,
+ batch_size_per_im,
+ fg_fraction,
+ fg_thresh,
+ bg_thresh,
+ num_classes,
+ ignore_thresh=-1.,
+ is_crowd=None,
+ use_random=True,
+ is_cascade=False,
+ cascade_iou=0.5,
+ assign_on_cpu=False):
+
+ rois_with_gt = []
+ tgt_labels = []
+ tgt_bboxes = []
+ tgt_gt_inds = []
+ new_rois_num = []
+
+ # In cascade rcnn, the threshold for foreground and background
+ # is used from cascade_iou
+ fg_thresh = cascade_iou if is_cascade else fg_thresh
+ bg_thresh = cascade_iou if is_cascade else bg_thresh
+ for i, rpn_roi in enumerate(rpn_rois):
+ gt_bbox = gt_boxes[i]
+ is_crowd_i = is_crowd[i] if is_crowd else None
+ gt_class = paddle.squeeze(gt_classes[i], axis=-1)
+
+ # Concat RoIs and gt boxes except cascade rcnn or none gt
+ if not is_cascade and gt_bbox.shape[0] > 0:
+ bbox = paddle.concat([rpn_roi, gt_bbox])
+ else:
+ bbox = rpn_roi
+
+ # Step1: label bbox
+ matches, match_labels = label_box(bbox, gt_bbox, fg_thresh, bg_thresh,
+ False, ignore_thresh, is_crowd_i,
+ assign_on_cpu)
+ # Step2: sample bbox
+ sampled_inds, sampled_gt_classes = sample_bbox(
+ matches, match_labels, gt_class, batch_size_per_im, fg_fraction,
+ num_classes, use_random, is_cascade)
+
+ # Step3: make output
+ rois_per_image = bbox if is_cascade else paddle.gather(bbox,
+ sampled_inds)
+ sampled_gt_ind = matches if is_cascade else paddle.gather(matches,
+ sampled_inds)
+ if gt_bbox.shape[0] > 0:
+ sampled_bbox = paddle.gather(gt_bbox, sampled_gt_ind)
+ else:
+ num = rois_per_image.shape[0]
+ sampled_bbox = paddle.zeros([num, 4], dtype='float32')
+
+ rois_per_image.stop_gradient = True
+ sampled_gt_ind.stop_gradient = True
+ sampled_bbox.stop_gradient = True
+ tgt_labels.append(sampled_gt_classes)
+ tgt_bboxes.append(sampled_bbox)
+ rois_with_gt.append(rois_per_image)
+ tgt_gt_inds.append(sampled_gt_ind)
+ new_rois_num.append(paddle.shape(sampled_inds)[0])
+ new_rois_num = paddle.concat(new_rois_num)
+ return rois_with_gt, tgt_labels, tgt_bboxes, tgt_gt_inds, new_rois_num
+
+
+def sample_bbox(matches,
+ match_labels,
+ gt_classes,
+ batch_size_per_im,
+ fg_fraction,
+ num_classes,
+ use_random=True,
+ is_cascade=False):
+
+ n_gt = gt_classes.shape[0]
+ if n_gt == 0:
+ # No truth, assign everything to background
+ gt_classes = paddle.ones(matches.shape, dtype='int32') * num_classes
+ #return matches, match_labels + num_classes
+ else:
+ gt_classes = paddle.gather(gt_classes, matches)
+ gt_classes = paddle.where(match_labels == 0,
+ paddle.ones_like(gt_classes) * num_classes,
+ gt_classes)
+ gt_classes = paddle.where(match_labels == -1,
+ paddle.ones_like(gt_classes) * -1, gt_classes)
+ if is_cascade:
+ index = paddle.arange(matches.shape[0])
+ return index, gt_classes
+ rois_per_image = int(batch_size_per_im)
+
+ fg_inds, bg_inds = subsample_labels(gt_classes, rois_per_image, fg_fraction,
+ num_classes, use_random)
+ if fg_inds.shape[0] == 0 and bg_inds.shape[0] == 0:
+ # fake output labeled with -1 when all boxes are neither
+ # foreground nor background
+ sampled_inds = paddle.zeros([1], dtype='int32')
+ else:
+ sampled_inds = paddle.concat([fg_inds, bg_inds])
+ sampled_gt_classes = paddle.gather(gt_classes, sampled_inds)
+ return sampled_inds, sampled_gt_classes
+
+
+def polygons_to_mask(polygons, height, width):
+ """
+ Convert the polygons to mask format
+
+ Args:
+ polygons (list[ndarray]): each array has shape (Nx2,)
+ height (int): mask height
+ width (int): mask width
+ Returns:
+ ndarray: a bool mask of shape (height, width)
+ """
+ import pycocotools.mask as mask_util
+ assert len(polygons) > 0, "COCOAPI does not support empty polygons"
+ rles = mask_util.frPyObjects(polygons, height, width)
+ rle = mask_util.merge(rles)
+ return mask_util.decode(rle).astype(np.bool)
+
+
+def rasterize_polygons_within_box(poly, box, resolution):
+ w, h = box[2] - box[0], box[3] - box[1]
+ polygons = [np.asarray(p, dtype=np.float64) for p in poly]
+ for p in polygons:
+ p[0::2] = p[0::2] - box[0]
+ p[1::2] = p[1::2] - box[1]
+
+ ratio_h = resolution / max(h, 0.1)
+ ratio_w = resolution / max(w, 0.1)
+
+ if ratio_h == ratio_w:
+ for p in polygons:
+ p *= ratio_h
+ else:
+ for p in polygons:
+ p[0::2] *= ratio_w
+ p[1::2] *= ratio_h
+
+ # 3. Rasterize the polygons with coco api
+ mask = polygons_to_mask(polygons, resolution, resolution)
+ mask = paddle.to_tensor(mask, dtype='int32')
+ return mask
+
+
+def generate_mask_target(gt_segms, rois, labels_int32, sampled_gt_inds,
+ num_classes, resolution):
+ mask_rois = []
+ mask_rois_num = []
+ tgt_masks = []
+ tgt_classes = []
+ mask_index = []
+ tgt_weights = []
+ for k in range(len(rois)):
+ labels_per_im = labels_int32[k]
+ # select rois labeled with foreground
+ fg_inds = paddle.nonzero(
+ paddle.logical_and(labels_per_im != -1, labels_per_im !=
+ num_classes))
+ has_fg = True
+ # generate fake roi if foreground is empty
+ if fg_inds.numel() == 0:
+ has_fg = False
+ fg_inds = paddle.ones([1], dtype='int32')
+ inds_per_im = sampled_gt_inds[k]
+ inds_per_im = paddle.gather(inds_per_im, fg_inds)
+
+ rois_per_im = rois[k]
+ fg_rois = paddle.gather(rois_per_im, fg_inds)
+ # Copy the foreground roi to cpu
+ # to generate mask target with ground-truth
+ boxes = fg_rois.numpy()
+ gt_segms_per_im = gt_segms[k]
+
+ new_segm = []
+ inds_per_im = inds_per_im.numpy()
+ if len(gt_segms_per_im) > 0:
+ for i in inds_per_im:
+ new_segm.append(gt_segms_per_im[i])
+ fg_inds_new = fg_inds.reshape([-1]).numpy()
+ results = []
+ if len(gt_segms_per_im) > 0:
+ for j in fg_inds_new:
+ results.append(
+ rasterize_polygons_within_box(new_segm[j], boxes[j],
+ resolution))
+ else:
+ results.append(paddle.ones([resolution, resolution], dtype='int32'))
+
+ fg_classes = paddle.gather(labels_per_im, fg_inds)
+ weight = paddle.ones([fg_rois.shape[0]], dtype='float32')
+ if not has_fg:
+ # now all sampled classes are background
+ # which will cause error in loss calculation,
+ # make fake classes with weight of 0.
+ fg_classes = paddle.zeros([1], dtype='int32')
+ weight = weight - 1
+ tgt_mask = paddle.stack(results)
+ tgt_mask.stop_gradient = True
+ fg_rois.stop_gradient = True
+
+ mask_index.append(fg_inds)
+ mask_rois.append(fg_rois)
+ mask_rois_num.append(paddle.shape(fg_rois)[0])
+ tgt_classes.append(fg_classes)
+ tgt_masks.append(tgt_mask)
+ tgt_weights.append(weight)
+
+ mask_index = paddle.concat(mask_index)
+ mask_rois_num = paddle.concat(mask_rois_num)
+ tgt_classes = paddle.concat(tgt_classes, axis=0)
+ tgt_masks = paddle.concat(tgt_masks, axis=0)
+ tgt_weights = paddle.concat(tgt_weights, axis=0)
+
+ return mask_rois, mask_rois_num, tgt_classes, tgt_masks, mask_index, tgt_weights
+
+
+def libra_sample_pos(max_overlaps, max_classes, pos_inds, num_expected):
+ if len(pos_inds) <= num_expected:
+ return pos_inds
+ else:
+ unique_gt_inds = np.unique(max_classes[pos_inds])
+ num_gts = len(unique_gt_inds)
+ num_per_gt = int(round(num_expected / float(num_gts)) + 1)
+
+ sampled_inds = []
+ for i in unique_gt_inds:
+ inds = np.nonzero(max_classes == i)[0]
+ before_len = len(inds)
+ inds = list(set(inds) & set(pos_inds))
+ after_len = len(inds)
+ if len(inds) > num_per_gt:
+ inds = np.random.choice(inds, size=num_per_gt, replace=False)
+ sampled_inds.extend(list(inds)) # combine as a new sampler
+ if len(sampled_inds) < num_expected:
+ num_extra = num_expected - len(sampled_inds)
+ extra_inds = np.array(list(set(pos_inds) - set(sampled_inds)))
+ assert len(sampled_inds) + len(extra_inds) == len(pos_inds), \
+ "sum of sampled_inds({}) and extra_inds({}) length must be equal with pos_inds({})!".format(
+ len(sampled_inds), len(extra_inds), len(pos_inds))
+ if len(extra_inds) > num_extra:
+ extra_inds = np.random.choice(
+ extra_inds, size=num_extra, replace=False)
+ sampled_inds.extend(extra_inds.tolist())
+ elif len(sampled_inds) > num_expected:
+ sampled_inds = np.random.choice(
+ sampled_inds, size=num_expected, replace=False)
+ return paddle.to_tensor(sampled_inds)
+
+
+def libra_sample_via_interval(max_overlaps, full_set, num_expected, floor_thr,
+ num_bins, bg_thresh):
+ max_iou = max_overlaps.max()
+ iou_interval = (max_iou - floor_thr) / num_bins
+ per_num_expected = int(num_expected / num_bins)
+
+ sampled_inds = []
+ for i in range(num_bins):
+ start_iou = floor_thr + i * iou_interval
+ end_iou = floor_thr + (i + 1) * iou_interval
+
+ tmp_set = set(
+ np.where(
+ np.logical_and(max_overlaps >= start_iou, max_overlaps <
+ end_iou))[0])
+ tmp_inds = list(tmp_set & full_set)
+
+ if len(tmp_inds) > per_num_expected:
+ tmp_sampled_set = np.random.choice(
+ tmp_inds, size=per_num_expected, replace=False)
+ else:
+ tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
+ sampled_inds.append(tmp_sampled_set)
+
+ sampled_inds = np.concatenate(sampled_inds)
+ if len(sampled_inds) < num_expected:
+ num_extra = num_expected - len(sampled_inds)
+ extra_inds = np.array(list(full_set - set(sampled_inds)))
+ assert len(sampled_inds) + len(extra_inds) == len(full_set), \
+ "sum of sampled_inds({}) and extra_inds({}) length must be equal with full_set({})!".format(
+ len(sampled_inds), len(extra_inds), len(full_set))
+
+ if len(extra_inds) > num_extra:
+ extra_inds = np.random.choice(extra_inds, num_extra, replace=False)
+ sampled_inds = np.concatenate([sampled_inds, extra_inds])
+
+ return sampled_inds
+
+
+def libra_sample_neg(max_overlaps,
+ max_classes,
+ neg_inds,
+ num_expected,
+ floor_thr=-1,
+ floor_fraction=0,
+ num_bins=3,
+ bg_thresh=0.5):
+ if len(neg_inds) <= num_expected:
+ return neg_inds
+ else:
+ # balance sampling for negative samples
+ neg_set = set(neg_inds.tolist())
+ if floor_thr > 0:
+ floor_set = set(
+ np.where(
+ np.logical_and(max_overlaps >= 0, max_overlaps < floor_thr))
+ [0])
+ iou_sampling_set = set(np.where(max_overlaps >= floor_thr)[0])
+ elif floor_thr == 0:
+ floor_set = set(np.where(max_overlaps == 0)[0])
+ iou_sampling_set = set(np.where(max_overlaps > floor_thr)[0])
+ else:
+ floor_set = set()
+ iou_sampling_set = set(np.where(max_overlaps > floor_thr)[0])
+ floor_thr = 0
+
+ floor_neg_inds = list(floor_set & neg_set)
+ iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
+
+ num_expected_iou_sampling = int(num_expected * (1 - floor_fraction))
+ if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
+ if num_bins >= 2:
+ iou_sampled_inds = libra_sample_via_interval(
+ max_overlaps,
+ set(iou_sampling_neg_inds), num_expected_iou_sampling,
+ floor_thr, num_bins, bg_thresh)
+ else:
+ iou_sampled_inds = np.random.choice(
+ iou_sampling_neg_inds,
+ size=num_expected_iou_sampling,
+ replace=False)
+ else:
+ iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int)
+ num_expected_floor = num_expected - len(iou_sampled_inds)
+ if len(floor_neg_inds) > num_expected_floor:
+ sampled_floor_inds = np.random.choice(
+ floor_neg_inds, size=num_expected_floor, replace=False)
+ else:
+ sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
+ sampled_inds = np.concatenate((sampled_floor_inds, iou_sampled_inds))
+ if len(sampled_inds) < num_expected:
+ num_extra = num_expected - len(sampled_inds)
+ extra_inds = np.array(list(neg_set - set(sampled_inds)))
+ if len(extra_inds) > num_extra:
+ extra_inds = np.random.choice(
+ extra_inds, size=num_extra, replace=False)
+ sampled_inds = np.concatenate((sampled_inds, extra_inds))
+ return paddle.to_tensor(sampled_inds)
+
+
+def libra_label_box(anchors, gt_boxes, gt_classes, positive_overlap,
+ negative_overlap, num_classes):
+ # TODO: use paddle API to speed up
+ gt_classes = gt_classes.numpy()
+ gt_overlaps = np.zeros((anchors.shape[0], num_classes))
+ matches = np.zeros((anchors.shape[0]), dtype=np.int32)
+ if len(gt_boxes) > 0:
+ proposal_to_gt_overlaps = bbox_overlaps(anchors, gt_boxes).numpy()
+ overlaps_argmax = proposal_to_gt_overlaps.argmax(axis=1)
+ overlaps_max = proposal_to_gt_overlaps.max(axis=1)
+ # Boxes which with non-zero overlap with gt boxes
+ overlapped_boxes_ind = np.where(overlaps_max > 0)[0]
+ overlapped_boxes_gt_classes = gt_classes[overlaps_argmax[
+ overlapped_boxes_ind]]
+
+ for idx in range(len(overlapped_boxes_ind)):
+ gt_overlaps[overlapped_boxes_ind[idx], overlapped_boxes_gt_classes[
+ idx]] = overlaps_max[overlapped_boxes_ind[idx]]
+ matches[overlapped_boxes_ind[idx]] = overlaps_argmax[
+ overlapped_boxes_ind[idx]]
+
+ gt_overlaps = paddle.to_tensor(gt_overlaps)
+ matches = paddle.to_tensor(matches)
+
+ matched_vals = paddle.max(gt_overlaps, axis=1)
+ match_labels = paddle.full(matches.shape, -1, dtype='int32')
+ match_labels = paddle.where(matched_vals < negative_overlap,
+ paddle.zeros_like(match_labels), match_labels)
+ match_labels = paddle.where(matched_vals >= positive_overlap,
+ paddle.ones_like(match_labels), match_labels)
+
+ return matches, match_labels, matched_vals
+
+
+def libra_sample_bbox(matches,
+ match_labels,
+ matched_vals,
+ gt_classes,
+ batch_size_per_im,
+ num_classes,
+ fg_fraction,
+ fg_thresh,
+ bg_thresh,
+ num_bins,
+ use_random=True,
+ is_cascade_rcnn=False):
+ rois_per_image = int(batch_size_per_im)
+ fg_rois_per_im = int(np.round(fg_fraction * rois_per_image))
+ bg_rois_per_im = rois_per_image - fg_rois_per_im
+
+ if is_cascade_rcnn:
+ fg_inds = paddle.nonzero(matched_vals >= fg_thresh)
+ bg_inds = paddle.nonzero(matched_vals < bg_thresh)
+ else:
+ matched_vals_np = matched_vals.numpy()
+ match_labels_np = match_labels.numpy()
+
+ # sample fg
+ fg_inds = paddle.nonzero(matched_vals >= fg_thresh).flatten()
+ fg_nums = int(np.minimum(fg_rois_per_im, fg_inds.shape[0]))
+ if (fg_inds.shape[0] > fg_nums) and use_random:
+ fg_inds = libra_sample_pos(matched_vals_np, match_labels_np,
+ fg_inds.numpy(), fg_rois_per_im)
+ fg_inds = fg_inds[:fg_nums]
+
+ # sample bg
+ bg_inds = paddle.nonzero(matched_vals < bg_thresh).flatten()
+ bg_nums = int(np.minimum(rois_per_image - fg_nums, bg_inds.shape[0]))
+ if (bg_inds.shape[0] > bg_nums) and use_random:
+ bg_inds = libra_sample_neg(
+ matched_vals_np,
+ match_labels_np,
+ bg_inds.numpy(),
+ bg_rois_per_im,
+ num_bins=num_bins,
+ bg_thresh=bg_thresh)
+ bg_inds = bg_inds[:bg_nums]
+
+ sampled_inds = paddle.concat([fg_inds, bg_inds])
+
+ gt_classes = paddle.gather(gt_classes, matches)
+ gt_classes = paddle.where(match_labels == 0,
+ paddle.ones_like(gt_classes) * num_classes,
+ gt_classes)
+ gt_classes = paddle.where(match_labels == -1,
+ paddle.ones_like(gt_classes) * -1, gt_classes)
+ sampled_gt_classes = paddle.gather(gt_classes, sampled_inds)
+
+ return sampled_inds, sampled_gt_classes
+
+
+def libra_generate_proposal_target(rpn_rois,
+ gt_classes,
+ gt_boxes,
+ batch_size_per_im,
+ fg_fraction,
+ fg_thresh,
+ bg_thresh,
+ num_classes,
+ use_random=True,
+ is_cascade_rcnn=False,
+ max_overlaps=None,
+ num_bins=3):
+
+ rois_with_gt = []
+ tgt_labels = []
+ tgt_bboxes = []
+ sampled_max_overlaps = []
+ tgt_gt_inds = []
+ new_rois_num = []
+
+ for i, rpn_roi in enumerate(rpn_rois):
+ max_overlap = max_overlaps[i] if is_cascade_rcnn else None
+ gt_bbox = gt_boxes[i]
+ gt_class = paddle.squeeze(gt_classes[i], axis=-1)
+ if is_cascade_rcnn:
+ rpn_roi = filter_roi(rpn_roi, max_overlap)
+ bbox = paddle.concat([rpn_roi, gt_bbox])
+
+ # Step1: label bbox
+ matches, match_labels, matched_vals = libra_label_box(
+ bbox, gt_bbox, gt_class, fg_thresh, bg_thresh, num_classes)
+
+ # Step2: sample bbox
+ sampled_inds, sampled_gt_classes = libra_sample_bbox(
+ matches, match_labels, matched_vals, gt_class, batch_size_per_im,
+ num_classes, fg_fraction, fg_thresh, bg_thresh, num_bins,
+ use_random, is_cascade_rcnn)
+
+ # Step3: make output
+ rois_per_image = paddle.gather(bbox, sampled_inds)
+ sampled_gt_ind = paddle.gather(matches, sampled_inds)
+ sampled_bbox = paddle.gather(gt_bbox, sampled_gt_ind)
+ sampled_overlap = paddle.gather(matched_vals, sampled_inds)
+
+ rois_per_image.stop_gradient = True
+ sampled_gt_ind.stop_gradient = True
+ sampled_bbox.stop_gradient = True
+ sampled_overlap.stop_gradient = True
+
+ tgt_labels.append(sampled_gt_classes)
+ tgt_bboxes.append(sampled_bbox)
+ rois_with_gt.append(rois_per_image)
+ sampled_max_overlaps.append(sampled_overlap)
+ tgt_gt_inds.append(sampled_gt_ind)
+ new_rois_num.append(paddle.shape(sampled_inds)[0])
+ new_rois_num = paddle.concat(new_rois_num)
+ # rois_with_gt, tgt_labels, tgt_bboxes, tgt_gt_inds, new_rois_num
+ return rois_with_gt, tgt_labels, tgt_bboxes, tgt_gt_inds, new_rois_num
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/target_layer.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/target_layer.py
new file mode 100644
index 000000000..3b5a09601
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/proposal_generator/target_layer.py
@@ -0,0 +1,490 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import sys
+import paddle
+from ppdet.core.workspace import register, serializable
+
+from .target import rpn_anchor_target, generate_proposal_target, generate_mask_target, libra_generate_proposal_target
+import numpy as np
+
+
+@register
+@serializable
+class RPNTargetAssign(object):
+ __shared__ = ['assign_on_cpu']
+ """
+ RPN targets assignment module
+
+ The assignment consists of three steps:
+ 1. Match anchor and ground-truth box, label the anchor with foreground
+ or background sample
+ 2. Sample anchors to keep the properly ratio between foreground and
+ background
+ 3. Generate the targets for classification and regression branch
+
+
+ Args:
+ batch_size_per_im (int): Total number of RPN samples per image.
+ default 256
+ fg_fraction (float): Fraction of anchors that is labeled
+ foreground, default 0.5
+ positive_overlap (float): Minimum overlap required between an anchor
+ and ground-truth box for the (anchor, gt box) pair to be
+ a foreground sample. default 0.7
+ negative_overlap (float): Maximum overlap allowed between an anchor
+ and ground-truth box for the (anchor, gt box) pair to be
+ a background sample. default 0.3
+ ignore_thresh(float): Threshold for ignoring the is_crowd ground-truth
+ if the value is larger than zero.
+ use_random (bool): Use random sampling to choose foreground and
+ background boxes, default true.
+ assign_on_cpu (bool): In case the number of gt box is too large,
+ compute IoU on CPU, default false.
+ """
+
+ def __init__(self,
+ batch_size_per_im=256,
+ fg_fraction=0.5,
+ positive_overlap=0.7,
+ negative_overlap=0.3,
+ ignore_thresh=-1.,
+ use_random=True,
+ assign_on_cpu=False):
+ super(RPNTargetAssign, self).__init__()
+ self.batch_size_per_im = batch_size_per_im
+ self.fg_fraction = fg_fraction
+ self.positive_overlap = positive_overlap
+ self.negative_overlap = negative_overlap
+ self.ignore_thresh = ignore_thresh
+ self.use_random = use_random
+ self.assign_on_cpu = assign_on_cpu
+
+ def __call__(self, inputs, anchors):
+ """
+ inputs: ground-truth instances.
+ anchor_box (Tensor): [num_anchors, 4], num_anchors are all anchors in all feature maps.
+ """
+ gt_boxes = inputs['gt_bbox']
+ is_crowd = inputs.get('is_crowd', None)
+ batch_size = len(gt_boxes)
+ tgt_labels, tgt_bboxes, tgt_deltas = rpn_anchor_target(
+ anchors,
+ gt_boxes,
+ self.batch_size_per_im,
+ self.positive_overlap,
+ self.negative_overlap,
+ self.fg_fraction,
+ self.use_random,
+ batch_size,
+ self.ignore_thresh,
+ is_crowd,
+ assign_on_cpu=self.assign_on_cpu)
+ norm = self.batch_size_per_im * batch_size
+
+ return tgt_labels, tgt_bboxes, tgt_deltas, norm
+
+
+@register
+class BBoxAssigner(object):
+ __shared__ = ['num_classes', 'assign_on_cpu']
+ """
+ RCNN targets assignment module
+
+ The assignment consists of three steps:
+ 1. Match RoIs and ground-truth box, label the RoIs with foreground
+ or background sample
+ 2. Sample anchors to keep the properly ratio between foreground and
+ background
+ 3. Generate the targets for classification and regression branch
+
+ Args:
+ batch_size_per_im (int): Total number of RoIs per image.
+ default 512
+ fg_fraction (float): Fraction of RoIs that is labeled
+ foreground, default 0.25
+ fg_thresh (float): Minimum overlap required between a RoI
+ and ground-truth box for the (roi, gt box) pair to be
+ a foreground sample. default 0.5
+ bg_thresh (float): Maximum overlap allowed between a RoI
+ and ground-truth box for the (roi, gt box) pair to be
+ a background sample. default 0.5
+ ignore_thresh(float): Threshold for ignoring the is_crowd ground-truth
+ if the value is larger than zero.
+ use_random (bool): Use random sampling to choose foreground and
+ background boxes, default true
+ cascade_iou (list[iou]): The list of overlap to select foreground and
+ background of each stage, which is only used In Cascade RCNN.
+ num_classes (int): The number of class.
+ assign_on_cpu (bool): In case the number of gt box is too large,
+ compute IoU on CPU, default false.
+ """
+
+ def __init__(self,
+ batch_size_per_im=512,
+ fg_fraction=.25,
+ fg_thresh=.5,
+ bg_thresh=.5,
+ ignore_thresh=-1.,
+ use_random=True,
+ cascade_iou=[0.5, 0.6, 0.7],
+ num_classes=80,
+ assign_on_cpu=False):
+ super(BBoxAssigner, self).__init__()
+ self.batch_size_per_im = batch_size_per_im
+ self.fg_fraction = fg_fraction
+ self.fg_thresh = fg_thresh
+ self.bg_thresh = bg_thresh
+ self.ignore_thresh = ignore_thresh
+ self.use_random = use_random
+ self.cascade_iou = cascade_iou
+ self.num_classes = num_classes
+ self.assign_on_cpu = assign_on_cpu
+
+ def __call__(self,
+ rpn_rois,
+ rpn_rois_num,
+ inputs,
+ stage=0,
+ is_cascade=False):
+ gt_classes = inputs['gt_class']
+ gt_boxes = inputs['gt_bbox']
+ is_crowd = inputs.get('is_crowd', None)
+ # rois, tgt_labels, tgt_bboxes, tgt_gt_inds
+ # new_rois_num
+ outs = generate_proposal_target(
+ rpn_rois, gt_classes, gt_boxes, self.batch_size_per_im,
+ self.fg_fraction, self.fg_thresh, self.bg_thresh, self.num_classes,
+ self.ignore_thresh, is_crowd, self.use_random, is_cascade,
+ self.cascade_iou[stage], self.assign_on_cpu)
+ rois = outs[0]
+ rois_num = outs[-1]
+ # tgt_labels, tgt_bboxes, tgt_gt_inds
+ targets = outs[1:4]
+ return rois, rois_num, targets
+
+
+@register
+class BBoxLibraAssigner(object):
+ __shared__ = ['num_classes']
+ """
+ Libra-RCNN targets assignment module
+
+ The assignment consists of three steps:
+ 1. Match RoIs and ground-truth box, label the RoIs with foreground
+ or background sample
+ 2. Sample anchors to keep the properly ratio between foreground and
+ background
+ 3. Generate the targets for classification and regression branch
+
+ Args:
+ batch_size_per_im (int): Total number of RoIs per image.
+ default 512
+ fg_fraction (float): Fraction of RoIs that is labeled
+ foreground, default 0.25
+ fg_thresh (float): Minimum overlap required between a RoI
+ and ground-truth box for the (roi, gt box) pair to be
+ a foreground sample. default 0.5
+ bg_thresh (float): Maximum overlap allowed between a RoI
+ and ground-truth box for the (roi, gt box) pair to be
+ a background sample. default 0.5
+ use_random (bool): Use random sampling to choose foreground and
+ background boxes, default true
+ cascade_iou (list[iou]): The list of overlap to select foreground and
+ background of each stage, which is only used In Cascade RCNN.
+ num_classes (int): The number of class.
+ num_bins (int): The number of libra_sample.
+ """
+
+ def __init__(self,
+ batch_size_per_im=512,
+ fg_fraction=.25,
+ fg_thresh=.5,
+ bg_thresh=.5,
+ use_random=True,
+ cascade_iou=[0.5, 0.6, 0.7],
+ num_classes=80,
+ num_bins=3):
+ super(BBoxLibraAssigner, self).__init__()
+ self.batch_size_per_im = batch_size_per_im
+ self.fg_fraction = fg_fraction
+ self.fg_thresh = fg_thresh
+ self.bg_thresh = bg_thresh
+ self.use_random = use_random
+ self.cascade_iou = cascade_iou
+ self.num_classes = num_classes
+ self.num_bins = num_bins
+
+ def __call__(self,
+ rpn_rois,
+ rpn_rois_num,
+ inputs,
+ stage=0,
+ is_cascade=False):
+ gt_classes = inputs['gt_class']
+ gt_boxes = inputs['gt_bbox']
+ # rois, tgt_labels, tgt_bboxes, tgt_gt_inds
+ outs = libra_generate_proposal_target(
+ rpn_rois, gt_classes, gt_boxes, self.batch_size_per_im,
+ self.fg_fraction, self.fg_thresh, self.bg_thresh, self.num_classes,
+ self.use_random, is_cascade, self.cascade_iou[stage], self.num_bins)
+ rois = outs[0]
+ rois_num = outs[-1]
+ # tgt_labels, tgt_bboxes, tgt_gt_inds
+ targets = outs[1:4]
+ return rois, rois_num, targets
+
+
+@register
+@serializable
+class MaskAssigner(object):
+ __shared__ = ['num_classes', 'mask_resolution']
+ """
+ Mask targets assignment module
+
+ The assignment consists of three steps:
+ 1. Select RoIs labels with foreground.
+ 2. Encode the RoIs and corresponding gt polygons to generate
+ mask target
+
+ Args:
+ num_classes (int): The number of class
+ mask_resolution (int): The resolution of mask target, default 14
+ """
+
+ def __init__(self, num_classes=80, mask_resolution=14):
+ super(MaskAssigner, self).__init__()
+ self.num_classes = num_classes
+ self.mask_resolution = mask_resolution
+
+ def __call__(self, rois, tgt_labels, tgt_gt_inds, inputs):
+ gt_segms = inputs['gt_poly']
+
+ outs = generate_mask_target(gt_segms, rois, tgt_labels, tgt_gt_inds,
+ self.num_classes, self.mask_resolution)
+
+ # mask_rois, mask_rois_num, tgt_classes, tgt_masks, mask_index, tgt_weights
+ return outs
+
+
+@register
+class RBoxAssigner(object):
+ """
+ assigner of rbox
+ Args:
+ pos_iou_thr (float): threshold of pos samples
+ neg_iou_thr (float): threshold of neg samples
+ min_iou_thr (float): the min threshold of samples
+ ignore_iof_thr (int): the ignored threshold
+ """
+
+ def __init__(self,
+ pos_iou_thr=0.5,
+ neg_iou_thr=0.4,
+ min_iou_thr=0.0,
+ ignore_iof_thr=-2):
+ super(RBoxAssigner, self).__init__()
+
+ self.pos_iou_thr = pos_iou_thr
+ self.neg_iou_thr = neg_iou_thr
+ self.min_iou_thr = min_iou_thr
+ self.ignore_iof_thr = ignore_iof_thr
+
+ def anchor_valid(self, anchors):
+ """
+
+ Args:
+ anchor: M x 4
+
+ Returns:
+
+ """
+ if anchors.ndim == 3:
+ anchors = anchors.reshape(-1, anchors.shape[-1])
+ assert anchors.ndim == 2
+ anchor_num = anchors.shape[0]
+ anchor_valid = np.ones((anchor_num), np.int32)
+ anchor_inds = np.arange(anchor_num)
+ return anchor_inds
+
+ def rbox2delta(self,
+ proposals,
+ gt,
+ means=[0, 0, 0, 0, 0],
+ stds=[1, 1, 1, 1, 1]):
+ """
+ Args:
+ proposals: tensor [N, 5]
+ gt: gt [N, 5]
+ means: means [5]
+ stds: stds [5]
+ Returns:
+
+ """
+ proposals = proposals.astype(np.float64)
+
+ PI = np.pi
+
+ gt_widths = gt[..., 2]
+ gt_heights = gt[..., 3]
+ gt_angle = gt[..., 4]
+
+ proposals_widths = proposals[..., 2]
+ proposals_heights = proposals[..., 3]
+ proposals_angle = proposals[..., 4]
+
+ coord = gt[..., 0:2] - proposals[..., 0:2]
+ dx = (np.cos(proposals[..., 4]) * coord[..., 0] +
+ np.sin(proposals[..., 4]) * coord[..., 1]) / proposals_widths
+ dy = (-np.sin(proposals[..., 4]) * coord[..., 0] +
+ np.cos(proposals[..., 4]) * coord[..., 1]) / proposals_heights
+ dw = np.log(gt_widths / proposals_widths)
+ dh = np.log(gt_heights / proposals_heights)
+ da = (gt_angle - proposals_angle)
+
+ da = (da + PI / 4) % PI - PI / 4
+ da /= PI
+
+ deltas = np.stack([dx, dy, dw, dh, da], axis=-1)
+ means = np.array(means, dtype=deltas.dtype)
+ stds = np.array(stds, dtype=deltas.dtype)
+ deltas = (deltas - means) / stds
+ deltas = deltas.astype(np.float32)
+ return deltas
+
+ def assign_anchor(self,
+ anchors,
+ gt_bboxes,
+ gt_lables,
+ pos_iou_thr,
+ neg_iou_thr,
+ min_iou_thr=0.0,
+ ignore_iof_thr=-2):
+ """
+
+ Args:
+ anchors:
+ gt_bboxes:[M, 5] rc,yc,w,h,angle
+ gt_lables:
+
+ Returns:
+
+ """
+ assert anchors.shape[1] == 4 or anchors.shape[1] == 5
+ assert gt_bboxes.shape[1] == 4 or gt_bboxes.shape[1] == 5
+ anchors_xc_yc = anchors
+ gt_bboxes_xc_yc = gt_bboxes
+
+ # calc rbox iou
+ anchors_xc_yc = anchors_xc_yc.astype(np.float32)
+ gt_bboxes_xc_yc = gt_bboxes_xc_yc.astype(np.float32)
+ anchors_xc_yc = paddle.to_tensor(anchors_xc_yc)
+ gt_bboxes_xc_yc = paddle.to_tensor(gt_bboxes_xc_yc)
+
+ try:
+ from rbox_iou_ops import rbox_iou
+ except Exception as e:
+ print("import custom_ops error, try install rbox_iou_ops " \
+ "following ppdet/ext_op/README.md", e)
+ sys.stdout.flush()
+ sys.exit(-1)
+
+ iou = rbox_iou(gt_bboxes_xc_yc, anchors_xc_yc)
+ iou = iou.numpy()
+ iou = iou.T
+
+ # every gt's anchor's index
+ gt_bbox_anchor_inds = iou.argmax(axis=0)
+ gt_bbox_anchor_iou = iou[gt_bbox_anchor_inds, np.arange(iou.shape[1])]
+ gt_bbox_anchor_iou_inds = np.where(iou == gt_bbox_anchor_iou)[0]
+
+ # every anchor's gt bbox's index
+ anchor_gt_bbox_inds = iou.argmax(axis=1)
+ anchor_gt_bbox_iou = iou[np.arange(iou.shape[0]), anchor_gt_bbox_inds]
+
+ # (1) set labels=-2 as default
+ labels = np.ones((iou.shape[0], ), dtype=np.int32) * ignore_iof_thr
+
+ # (2) assign ignore
+ labels[anchor_gt_bbox_iou < min_iou_thr] = ignore_iof_thr
+
+ # (3) assign neg_ids -1
+ assign_neg_ids1 = anchor_gt_bbox_iou >= min_iou_thr
+ assign_neg_ids2 = anchor_gt_bbox_iou < neg_iou_thr
+ assign_neg_ids = np.logical_and(assign_neg_ids1, assign_neg_ids2)
+ labels[assign_neg_ids] = -1
+
+ # anchor_gt_bbox_iou_inds
+ # (4) assign max_iou as pos_ids >=0
+ anchor_gt_bbox_iou_inds = anchor_gt_bbox_inds[gt_bbox_anchor_iou_inds]
+ # gt_bbox_anchor_iou_inds = np.logical_and(gt_bbox_anchor_iou_inds, anchor_gt_bbox_iou >= min_iou_thr)
+ labels[gt_bbox_anchor_iou_inds] = gt_lables[anchor_gt_bbox_iou_inds]
+
+ # (5) assign >= pos_iou_thr as pos_ids
+ iou_pos_iou_thr_ids = anchor_gt_bbox_iou >= pos_iou_thr
+ iou_pos_iou_thr_ids_box_inds = anchor_gt_bbox_inds[iou_pos_iou_thr_ids]
+ labels[iou_pos_iou_thr_ids] = gt_lables[iou_pos_iou_thr_ids_box_inds]
+ return anchor_gt_bbox_inds, anchor_gt_bbox_iou, labels
+
+ def __call__(self, anchors, gt_bboxes, gt_labels, is_crowd):
+
+ assert anchors.ndim == 2
+ assert anchors.shape[1] == 5
+ assert gt_bboxes.ndim == 2
+ assert gt_bboxes.shape[1] == 5
+
+ pos_iou_thr = self.pos_iou_thr
+ neg_iou_thr = self.neg_iou_thr
+ min_iou_thr = self.min_iou_thr
+ ignore_iof_thr = self.ignore_iof_thr
+
+ anchor_num = anchors.shape[0]
+
+ gt_bboxes = gt_bboxes
+ is_crowd_slice = is_crowd
+ not_crowd_inds = np.where(is_crowd_slice == 0)
+
+ # Step1: match anchor and gt_bbox
+ anchor_gt_bbox_inds, anchor_gt_bbox_iou, labels = self.assign_anchor(
+ anchors, gt_bboxes,
+ gt_labels.reshape(-1), pos_iou_thr, neg_iou_thr, min_iou_thr,
+ ignore_iof_thr)
+
+ # Step2: sample anchor
+ pos_inds = np.where(labels >= 0)[0]
+ neg_inds = np.where(labels == -1)[0]
+
+ # Step3: make output
+ anchors_num = anchors.shape[0]
+ bbox_targets = np.zeros_like(anchors)
+ bbox_weights = np.zeros_like(anchors)
+ bbox_gt_bboxes = np.zeros_like(anchors)
+ pos_labels = np.zeros(anchors_num, dtype=np.int32)
+ pos_labels_weights = np.zeros(anchors_num, dtype=np.float32)
+
+ pos_sampled_anchors = anchors[pos_inds]
+ pos_sampled_gt_boxes = gt_bboxes[anchor_gt_bbox_inds[pos_inds]]
+ if len(pos_inds) > 0:
+ pos_bbox_targets = self.rbox2delta(pos_sampled_anchors,
+ pos_sampled_gt_boxes)
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_gt_bboxes[pos_inds, :] = pos_sampled_gt_boxes
+ bbox_weights[pos_inds, :] = 1.0
+
+ pos_labels[pos_inds] = labels[pos_inds]
+ pos_labels_weights[pos_inds] = 1.0
+
+ if len(neg_inds) > 0:
+ pos_labels_weights[neg_inds] = 1.0
+ return (pos_labels, pos_labels_weights, bbox_targets, bbox_weights,
+ bbox_gt_bboxes, pos_inds, neg_inds)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__init__.py
new file mode 100644
index 000000000..3b461325f
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import jde_embedding_head
+from . import fairmot_embedding_head
+from . import resnet
+from . import pyramidal_embedding
+from . import pplcnet_embedding
+
+from .fairmot_embedding_head import *
+from .jde_embedding_head import *
+from .resnet import *
+from .pyramidal_embedding import *
+from .pplcnet_embedding import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..c892d8565
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/fairmot_embedding_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/fairmot_embedding_head.cpython-37.pyc
new file mode 100644
index 000000000..67188be62
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/fairmot_embedding_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/jde_embedding_head.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/jde_embedding_head.cpython-37.pyc
new file mode 100644
index 000000000..997d37bc1
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/jde_embedding_head.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/pplcnet_embedding.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/pplcnet_embedding.cpython-37.pyc
new file mode 100644
index 000000000..52c1121b2
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/pplcnet_embedding.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/pyramidal_embedding.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/pyramidal_embedding.cpython-37.pyc
new file mode 100644
index 000000000..24c90d066
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/pyramidal_embedding.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/resnet.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/resnet.cpython-37.pyc
new file mode 100644
index 000000000..43525e482
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/__pycache__/resnet.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/fairmot_embedding_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/fairmot_embedding_head.py
new file mode 100644
index 000000000..98ca257fd
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/fairmot_embedding_head.py
@@ -0,0 +1,224 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import math
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import KaimingUniform, Uniform
+from ppdet.core.workspace import register
+from ppdet.modeling.heads.centernet_head import ConvLayer
+
+__all__ = ['FairMOTEmbeddingHead']
+
+
+@register
+class FairMOTEmbeddingHead(nn.Layer):
+ __shared__ = ['num_classes']
+ """
+ Args:
+ in_channels (int): the channel number of input to FairMOTEmbeddingHead.
+ ch_head (int): the channel of features before fed into embedding, 256 by default.
+ ch_emb (int): the channel of the embedding feature, 128 by default.
+ num_identities_dict (dict): the number of identities of each category,
+ support single class and multi-calss, {0: 14455} as default.
+ """
+
+ def __init__(self,
+ in_channels,
+ ch_head=256,
+ ch_emb=128,
+ num_classes=1,
+ num_identities_dict={0: 14455}):
+ super(FairMOTEmbeddingHead, self).__init__()
+ assert num_classes >= 1
+ self.num_classes = num_classes
+ self.ch_emb = ch_emb
+ self.num_identities_dict = num_identities_dict
+ self.reid = nn.Sequential(
+ ConvLayer(
+ in_channels, ch_head, kernel_size=3, padding=1, bias=True),
+ nn.ReLU(),
+ ConvLayer(
+ ch_head, ch_emb, kernel_size=1, stride=1, padding=0, bias=True))
+ param_attr = paddle.ParamAttr(initializer=KaimingUniform())
+ bound = 1 / math.sqrt(ch_emb)
+ bias_attr = paddle.ParamAttr(initializer=Uniform(-bound, bound))
+ self.reid_loss = nn.CrossEntropyLoss(ignore_index=-1, reduction='sum')
+
+ if num_classes == 1:
+ nID = self.num_identities_dict[0] # single class
+ self.classifier = nn.Linear(
+ ch_emb, nID, weight_attr=param_attr, bias_attr=bias_attr)
+ # When num_identities(nID) is 1, emb_scale is set as 1
+ self.emb_scale = math.sqrt(2) * math.log(nID - 1) if nID > 1 else 1
+ else:
+ self.classifiers = dict()
+ self.emb_scale_dict = dict()
+ for cls_id, nID in self.num_identities_dict.items():
+ self.classifiers[str(cls_id)] = nn.Linear(
+ ch_emb, nID, weight_attr=param_attr, bias_attr=bias_attr)
+ # When num_identities(nID) is 1, emb_scale is set as 1
+ self.emb_scale_dict[str(cls_id)] = math.sqrt(2) * math.log(
+ nID - 1) if nID > 1 else 1
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ if isinstance(input_shape, (list, tuple)):
+ input_shape = input_shape[0]
+ return {'in_channels': input_shape.channels}
+
+ def process_by_class(self, bboxes, embedding, bbox_inds, topk_clses):
+ pred_dets, pred_embs = [], []
+ for cls_id in range(self.num_classes):
+ inds_masks = topk_clses == cls_id
+ inds_masks = paddle.cast(inds_masks, 'float32')
+
+ pos_num = inds_masks.sum().numpy()
+ if pos_num == 0:
+ continue
+
+ cls_inds_mask = inds_masks > 0
+
+ bbox_mask = paddle.nonzero(cls_inds_mask)
+ cls_bboxes = paddle.gather_nd(bboxes, bbox_mask)
+ pred_dets.append(cls_bboxes)
+
+ cls_inds = paddle.masked_select(bbox_inds, cls_inds_mask)
+ cls_inds = cls_inds.unsqueeze(-1)
+ cls_embedding = paddle.gather_nd(embedding, cls_inds)
+ pred_embs.append(cls_embedding)
+
+ return paddle.concat(pred_dets), paddle.concat(pred_embs)
+
+ def forward(self,
+ neck_feat,
+ inputs,
+ bboxes=None,
+ bbox_inds=None,
+ topk_clses=None):
+ reid_feat = self.reid(neck_feat)
+ if self.training:
+ if self.num_classes == 1:
+ loss = self.get_loss(reid_feat, inputs)
+ else:
+ loss = self.get_mc_loss(reid_feat, inputs)
+ return loss
+ else:
+ assert bboxes is not None and bbox_inds is not None
+ reid_feat = F.normalize(reid_feat)
+ embedding = paddle.transpose(reid_feat, [0, 2, 3, 1])
+ embedding = paddle.reshape(embedding, [-1, self.ch_emb])
+ # embedding shape: [bs * h * w, ch_emb]
+
+ if self.num_classes == 1:
+ pred_dets = bboxes
+ pred_embs = paddle.gather(embedding, bbox_inds)
+ else:
+ pred_dets, pred_embs = self.process_by_class(
+ bboxes, embedding, bbox_inds, topk_clses)
+ return pred_dets, pred_embs
+
+ def get_loss(self, feat, inputs):
+ index = inputs['index']
+ mask = inputs['index_mask']
+ target = inputs['reid']
+ target = paddle.masked_select(target, mask > 0)
+ target = paddle.unsqueeze(target, 1)
+
+ feat = paddle.transpose(feat, perm=[0, 2, 3, 1])
+ feat_n, feat_h, feat_w, feat_c = feat.shape
+ feat = paddle.reshape(feat, shape=[feat_n, -1, feat_c])
+ index = paddle.unsqueeze(index, 2)
+ batch_inds = list()
+ for i in range(feat_n):
+ batch_ind = paddle.full(
+ shape=[1, index.shape[1], 1], fill_value=i, dtype='int64')
+ batch_inds.append(batch_ind)
+ batch_inds = paddle.concat(batch_inds, axis=0)
+ index = paddle.concat(x=[batch_inds, index], axis=2)
+ feat = paddle.gather_nd(feat, index=index)
+
+ mask = paddle.unsqueeze(mask, axis=2)
+ mask = paddle.expand_as(mask, feat)
+ mask.stop_gradient = True
+ feat = paddle.masked_select(feat, mask > 0)
+ feat = paddle.reshape(feat, shape=[-1, feat_c])
+ feat = F.normalize(feat)
+ feat = self.emb_scale * feat
+ logit = self.classifier(feat)
+ target.stop_gradient = True
+ loss = self.reid_loss(logit, target)
+ valid = (target != self.reid_loss.ignore_index)
+ valid.stop_gradient = True
+ count = paddle.sum((paddle.cast(valid, dtype=np.int32)))
+ count.stop_gradient = True
+ if count > 0:
+ loss = loss / count
+
+ return loss
+
+ def get_mc_loss(self, feat, inputs):
+ # feat.shape = [bs, ch_emb, h, w]
+ assert 'cls_id_map' in inputs and 'cls_tr_ids' in inputs
+ index = inputs['index']
+ mask = inputs['index_mask']
+ cls_id_map = inputs['cls_id_map'] # [bs, h, w]
+ cls_tr_ids = inputs['cls_tr_ids'] # [bs, num_classes, h, w]
+
+ feat = paddle.transpose(feat, perm=[0, 2, 3, 1])
+ feat_n, feat_h, feat_w, feat_c = feat.shape
+ feat = paddle.reshape(feat, shape=[feat_n, -1, feat_c])
+
+ index = paddle.unsqueeze(index, 2)
+ batch_inds = list()
+ for i in range(feat_n):
+ batch_ind = paddle.full(
+ shape=[1, index.shape[1], 1], fill_value=i, dtype='int64')
+ batch_inds.append(batch_ind)
+ batch_inds = paddle.concat(batch_inds, axis=0)
+ index = paddle.concat(x=[batch_inds, index], axis=2)
+ feat = paddle.gather_nd(feat, index=index)
+
+ mask = paddle.unsqueeze(mask, axis=2)
+ mask = paddle.expand_as(mask, feat)
+ mask.stop_gradient = True
+ feat = paddle.masked_select(feat, mask > 0)
+ feat = paddle.reshape(feat, shape=[-1, feat_c])
+
+ reid_losses = 0
+ for cls_id, id_num in self.num_identities_dict.items():
+ # target
+ cur_cls_tr_ids = paddle.reshape(
+ cls_tr_ids[:, cls_id, :, :], shape=[feat_n, -1]) # [bs, h*w]
+ cls_id_target = paddle.gather_nd(cur_cls_tr_ids, index=index)
+ mask = inputs['index_mask']
+ cls_id_target = paddle.masked_select(cls_id_target, mask > 0)
+ cls_id_target.stop_gradient = True
+
+ # feat
+ cls_id_feat = self.emb_scale_dict[str(cls_id)] * F.normalize(feat)
+ cls_id_pred = self.classifiers[str(cls_id)](cls_id_feat)
+
+ loss = self.reid_loss(cls_id_pred, cls_id_target)
+ valid = (cls_id_target != self.reid_loss.ignore_index)
+ valid.stop_gradient = True
+ count = paddle.sum((paddle.cast(valid, dtype=np.int32)))
+ count.stop_gradient = True
+ if count > 0:
+ loss = loss / count
+ reid_losses += loss
+
+ return reid_losses
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/jde_embedding_head.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/jde_embedding_head.py
new file mode 100644
index 000000000..c35f8cfb0
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/jde_embedding_head.py
@@ -0,0 +1,212 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import numpy as np
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.regularizer import L2Decay
+from ppdet.core.workspace import register
+from paddle.nn.initializer import Normal, Constant
+
+__all__ = ['JDEEmbeddingHead']
+
+
+class LossParam(nn.Layer):
+ def __init__(self, init_value=0., use_uncertainy=True):
+ super(LossParam, self).__init__()
+ self.loss_param = self.create_parameter(
+ shape=[1],
+ attr=ParamAttr(initializer=Constant(value=init_value)),
+ dtype="float32")
+
+ def forward(self, inputs):
+ out = paddle.exp(-self.loss_param) * inputs + self.loss_param
+ return out * 0.5
+
+
+@register
+class JDEEmbeddingHead(nn.Layer):
+ __shared__ = ['num_classes']
+ __inject__ = ['emb_loss', 'jde_loss']
+ """
+ JDEEmbeddingHead
+ Args:
+ num_classes(int): Number of classes. Only support one class tracking.
+ num_identities(int): Number of identities.
+ anchor_levels(int): Number of anchor levels, same as FPN levels.
+ anchor_scales(int): Number of anchor scales on each FPN level.
+ embedding_dim(int): Embedding dimension. Default: 512.
+ emb_loss(object): Instance of 'JDEEmbeddingLoss'
+ jde_loss(object): Instance of 'JDELoss'
+ """
+
+ def __init__(
+ self,
+ num_classes=1,
+ num_identities=14455, # dataset.num_identities_dict[0]
+ anchor_levels=3,
+ anchor_scales=4,
+ embedding_dim=512,
+ emb_loss='JDEEmbeddingLoss',
+ jde_loss='JDELoss'):
+ super(JDEEmbeddingHead, self).__init__()
+ self.num_classes = num_classes
+ self.num_identities = num_identities
+ self.anchor_levels = anchor_levels
+ self.anchor_scales = anchor_scales
+ self.embedding_dim = embedding_dim
+ self.emb_loss = emb_loss
+ self.jde_loss = jde_loss
+
+ self.emb_scale = math.sqrt(2) * math.log(
+ self.num_identities - 1) if self.num_identities > 1 else 1
+
+ self.identify_outputs = []
+ self.loss_params_cls = []
+ self.loss_params_reg = []
+ self.loss_params_ide = []
+ for i in range(self.anchor_levels):
+ name = 'identify_output.{}'.format(i)
+ identify_output = self.add_sublayer(
+ name,
+ nn.Conv2D(
+ in_channels=64 * (2**self.anchor_levels) // (2**i),
+ out_channels=self.embedding_dim,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias_attr=ParamAttr(regularizer=L2Decay(0.))))
+ self.identify_outputs.append(identify_output)
+
+ loss_p_cls = self.add_sublayer('cls.{}'.format(i), LossParam(-4.15))
+ self.loss_params_cls.append(loss_p_cls)
+ loss_p_reg = self.add_sublayer('reg.{}'.format(i), LossParam(-4.85))
+ self.loss_params_reg.append(loss_p_reg)
+ loss_p_ide = self.add_sublayer('ide.{}'.format(i), LossParam(-2.3))
+ self.loss_params_ide.append(loss_p_ide)
+
+ self.classifier = self.add_sublayer(
+ 'classifier',
+ nn.Linear(
+ self.embedding_dim,
+ self.num_identities,
+ weight_attr=ParamAttr(
+ learning_rate=1., initializer=Normal(
+ mean=0.0, std=0.01)),
+ bias_attr=ParamAttr(
+ learning_rate=2., regularizer=L2Decay(0.))))
+
+ def forward(self,
+ identify_feats,
+ targets,
+ loss_confs=None,
+ loss_boxes=None,
+ bboxes=None,
+ boxes_idx=None,
+ nms_keep_idx=None):
+ assert self.num_classes == 1, 'JDE only support sindle class MOT.'
+ assert len(identify_feats) == self.anchor_levels
+ ide_outs = []
+ for feat, ide_head in zip(identify_feats, self.identify_outputs):
+ ide_outs.append(ide_head(feat))
+
+ if self.training:
+ assert len(loss_confs) == len(loss_boxes) == self.anchor_levels
+ loss_ides = self.emb_loss(ide_outs, targets, self.emb_scale,
+ self.classifier)
+ jde_losses = self.jde_loss(
+ loss_confs, loss_boxes, loss_ides, self.loss_params_cls,
+ self.loss_params_reg, self.loss_params_ide, targets)
+ return jde_losses
+ else:
+ assert bboxes is not None
+ assert boxes_idx is not None
+ assert nms_keep_idx is not None
+
+ emb_outs = self.get_emb_outs(ide_outs)
+ emb_valid = paddle.gather_nd(emb_outs, boxes_idx)
+ pred_embs = paddle.gather_nd(emb_valid, nms_keep_idx)
+
+ input_shape = targets['image'].shape[2:]
+ # input_shape: [h, w], before data transforms, set in model config
+ im_shape = targets['im_shape'][0].numpy()
+ # im_shape: [new_h, new_w], after data transforms
+ scale_factor = targets['scale_factor'][0].numpy()
+ bboxes[:, 2:] = self.scale_coords(bboxes[:, 2:], input_shape,
+ im_shape, scale_factor)
+ # tlwhs, scores, cls_ids
+ pred_dets = paddle.concat(
+ (bboxes[:, 2:], bboxes[:, 1:2], bboxes[:, 0:1]), axis=1)
+ return pred_dets, pred_embs
+
+ def scale_coords(self, coords, input_shape, im_shape, scale_factor):
+ ratio = scale_factor[0]
+ pad_w = (input_shape[1] - int(im_shape[1])) / 2
+ pad_h = (input_shape[0] - int(im_shape[0])) / 2
+ coords = paddle.cast(coords, 'float32')
+ coords[:, 0::2] -= pad_w
+ coords[:, 1::2] -= pad_h
+ coords[:, 0:4] /= ratio
+ coords[:, :4] = paddle.clip(
+ coords[:, :4], min=0, max=coords[:, :4].max())
+ return coords.round()
+
+ def get_emb_and_gt_outs(self, ide_outs, targets):
+ emb_and_gts = []
+ for i, p_ide in enumerate(ide_outs):
+ t_conf = targets['tconf{}'.format(i)]
+ t_ide = targets['tide{}'.format(i)]
+
+ p_ide = p_ide.transpose((0, 2, 3, 1))
+ p_ide_flatten = paddle.reshape(p_ide, [-1, self.embedding_dim])
+
+ mask = t_conf > 0
+ mask = paddle.cast(mask, dtype="int64")
+ emb_mask = mask.max(1).flatten()
+ emb_mask_inds = paddle.nonzero(emb_mask > 0).flatten()
+ if len(emb_mask_inds) > 0:
+ t_ide_flatten = paddle.reshape(t_ide.max(1), [-1, 1])
+ tids = paddle.gather(t_ide_flatten, emb_mask_inds)
+
+ embedding = paddle.gather(p_ide_flatten, emb_mask_inds)
+ embedding = self.emb_scale * F.normalize(embedding)
+ emb_and_gt = paddle.concat([embedding, tids], axis=1)
+ emb_and_gts.append(emb_and_gt)
+
+ if len(emb_and_gts) > 0:
+ return paddle.concat(emb_and_gts, axis=0)
+ else:
+ return paddle.zeros((1, self.embedding_dim + 1))
+
+ def get_emb_outs(self, ide_outs):
+ emb_outs = []
+ for i, p_ide in enumerate(ide_outs):
+ p_ide = p_ide.transpose((0, 2, 3, 1))
+
+ p_ide_repeat = paddle.tile(p_ide, [self.anchor_scales, 1, 1, 1])
+ embedding = F.normalize(p_ide_repeat, axis=-1)
+ emb = paddle.reshape(embedding, [-1, self.embedding_dim])
+ emb_outs.append(emb)
+
+ if len(emb_outs) > 0:
+ return paddle.concat(emb_outs, axis=0)
+ else:
+ return paddle.zeros((1, self.embedding_dim))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/pplcnet_embedding.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/pplcnet_embedding.py
new file mode 100644
index 000000000..cad9f85be
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/pplcnet_embedding.py
@@ -0,0 +1,281 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import Normal, Constant
+from paddle import ParamAttr
+from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Linear
+from paddle.regularizer import L2Decay
+from paddle.nn.initializer import KaimingNormal, XavierNormal
+from ppdet.core.workspace import register
+
+__all__ = ['PPLCNetEmbedding']
+
+
+# Each element(list) represents a depthwise block, which is composed of k, in_c, out_c, s, use_se.
+# k: kernel_size
+# in_c: input channel number in depthwise block
+# out_c: output channel number in depthwise block
+# s: stride in depthwise block
+# use_se: whether to use SE block
+
+NET_CONFIG = {
+ "blocks2":
+ #k, in_c, out_c, s, use_se
+ [[3, 16, 32, 1, False]],
+ "blocks3": [[3, 32, 64, 2, False], [3, 64, 64, 1, False]],
+ "blocks4": [[3, 64, 128, 2, False], [3, 128, 128, 1, False]],
+ "blocks5": [[3, 128, 256, 2, False], [5, 256, 256, 1, False],
+ [5, 256, 256, 1, False], [5, 256, 256, 1, False],
+ [5, 256, 256, 1, False], [5, 256, 256, 1, False]],
+ "blocks6": [[5, 256, 512, 2, True], [5, 512, 512, 1, True]]
+}
+
+
+def make_divisible(v, divisor=8, min_value=None):
+ if min_value is None:
+ min_value = divisor
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
+ if new_v < 0.9 * v:
+ new_v += divisor
+ return new_v
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ num_channels,
+ filter_size,
+ num_filters,
+ stride,
+ num_groups=1):
+ super().__init__()
+
+ self.conv = Conv2D(
+ in_channels=num_channels,
+ out_channels=num_filters,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ groups=num_groups,
+ weight_attr=ParamAttr(initializer=KaimingNormal()),
+ bias_attr=False)
+
+ self.bn = BatchNorm(
+ num_filters,
+ param_attr=ParamAttr(regularizer=L2Decay(0.0)),
+ bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
+ self.hardswish = nn.Hardswish()
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ x = self.hardswish(x)
+ return x
+
+
+class DepthwiseSeparable(nn.Layer):
+ def __init__(self,
+ num_channels,
+ num_filters,
+ stride,
+ dw_size=3,
+ use_se=False):
+ super().__init__()
+ self.use_se = use_se
+ self.dw_conv = ConvBNLayer(
+ num_channels=num_channels,
+ num_filters=num_channels,
+ filter_size=dw_size,
+ stride=stride,
+ num_groups=num_channels)
+ if use_se:
+ self.se = SEModule(num_channels)
+ self.pw_conv = ConvBNLayer(
+ num_channels=num_channels,
+ filter_size=1,
+ num_filters=num_filters,
+ stride=1)
+
+ def forward(self, x):
+ x = self.dw_conv(x)
+ if self.use_se:
+ x = self.se(x)
+ x = self.pw_conv(x)
+ return x
+
+
+class SEModule(nn.Layer):
+ def __init__(self, channel, reduction=4):
+ super().__init__()
+ self.avg_pool = AdaptiveAvgPool2D(1)
+ self.conv1 = Conv2D(
+ in_channels=channel,
+ out_channels=channel // reduction,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self.relu = nn.ReLU()
+ self.conv2 = Conv2D(
+ in_channels=channel // reduction,
+ out_channels=channel,
+ kernel_size=1,
+ stride=1,
+ padding=0)
+ self.hardsigmoid = nn.Hardsigmoid()
+
+ def forward(self, x):
+ identity = x
+ x = self.avg_pool(x)
+ x = self.conv1(x)
+ x = self.relu(x)
+ x = self.conv2(x)
+ x = self.hardsigmoid(x)
+ x = paddle.multiply(x=identity, y=x)
+ return x
+
+
+class PPLCNet(nn.Layer):
+ """
+ PP-LCNet, see https://arxiv.org/abs/2109.15099.
+ This code is different from PPLCNet in ppdet/modeling/backbones/lcnet.py
+ or in PaddleClas, because the output is the flatten feature of last_conv.
+
+ Args:
+ scale (float): Scale ratio of channels.
+ class_expand (int): Number of channels of conv feature.
+ """
+
+ def __init__(self, scale=1.0, class_expand=1280):
+ super(PPLCNet, self).__init__()
+ self.scale = scale
+ self.class_expand = class_expand
+
+ self.conv1 = ConvBNLayer(
+ num_channels=3,
+ filter_size=3,
+ num_filters=make_divisible(16 * scale),
+ stride=2)
+
+ self.blocks2 = nn.Sequential(*[
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks2"])
+ ])
+
+ self.blocks3 = nn.Sequential(*[
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks3"])
+ ])
+
+ self.blocks4 = nn.Sequential(*[
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks4"])
+ ])
+
+ self.blocks5 = nn.Sequential(*[
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks5"])
+ ])
+
+ self.blocks6 = nn.Sequential(*[
+ DepthwiseSeparable(
+ num_channels=make_divisible(in_c * scale),
+ num_filters=make_divisible(out_c * scale),
+ dw_size=k,
+ stride=s,
+ use_se=se)
+ for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks6"])
+ ])
+
+ self.avg_pool = AdaptiveAvgPool2D(1)
+ self.last_conv = Conv2D(
+ in_channels=make_divisible(NET_CONFIG["blocks6"][-1][2] * scale),
+ out_channels=self.class_expand,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias_attr=False)
+ self.hardswish = nn.Hardswish()
+ self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
+
+ def forward(self, x):
+ x = self.conv1(x)
+
+ x = self.blocks2(x)
+ x = self.blocks3(x)
+ x = self.blocks4(x)
+ x = self.blocks5(x)
+ x = self.blocks6(x)
+
+ x = self.avg_pool(x)
+ x = self.last_conv(x)
+ x = self.hardswish(x)
+ x = self.flatten(x)
+ return x
+
+
+class FC(nn.Layer):
+ def __init__(self, input_ch, output_ch):
+ super(FC, self).__init__()
+ weight_attr = ParamAttr(initializer=XavierNormal())
+ self.fc = paddle.nn.Linear(input_ch, output_ch, weight_attr=weight_attr)
+
+ def forward(self, x):
+ out = self.fc(x)
+ return out
+
+
+@register
+class PPLCNetEmbedding(nn.Layer):
+ """
+ PPLCNet Embedding
+
+ Args:
+ input_ch (int): Number of channels of input conv feature.
+ output_ch (int): Number of channels of output conv feature.
+ """
+ def __init__(self, scale=2.5, input_ch=1280, output_ch=512):
+ super(PPLCNetEmbedding, self).__init__()
+ self.backbone = PPLCNet(scale=scale)
+ self.neck = FC(input_ch, output_ch)
+
+ def forward(self, x):
+ feat = self.backbone(x)
+ feat_out = self.neck(feat)
+ return feat_out
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/pyramidal_embedding.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/pyramidal_embedding.py
new file mode 100644
index 000000000..a90d4e1ef
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/pyramidal_embedding.py
@@ -0,0 +1,144 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import Normal, Constant
+from paddle import ParamAttr
+from .resnet import *
+from ppdet.core.workspace import register
+
+__all__ = ['PCBPyramid']
+
+
+@register
+class PCBPyramid(nn.Layer):
+ """
+ PCB (Part-based Convolutional Baseline), see https://arxiv.org/abs/1711.09349,
+ Pyramidal Person Re-IDentification, see https://arxiv.org/abs/1810.12193
+
+ Args:
+ input_ch (int): Number of channels of the input feature.
+ num_stripes (int): Number of sub-parts.
+ used_levels (tuple): Whether the level is used, 1 means used.
+ num_classes (int): Number of classes for identities, default 751 in
+ Market-1501 dataset.
+ last_conv_stride (int): Stride of the last conv.
+ last_conv_dilation (int): Dilation of the last conv.
+ num_conv_out_channels (int): Number of channels of conv feature.
+ """
+
+ def __init__(self,
+ input_ch=2048,
+ num_stripes=6,
+ used_levels=(1, 1, 1, 1, 1, 1),
+ num_classes=751,
+ last_conv_stride=1,
+ last_conv_dilation=1,
+ num_conv_out_channels=128):
+ super(PCBPyramid, self).__init__()
+ self.num_stripes = num_stripes
+ self.used_levels = used_levels
+ self.num_classes = num_classes
+
+ self.num_in_each_level = [i for i in range(self.num_stripes, 0, -1)]
+ self.num_branches = sum(self.num_in_each_level)
+
+ self.base = ResNet101(
+ lr_mult=0.1,
+ last_conv_stride=last_conv_stride,
+ last_conv_dilation=last_conv_dilation)
+ self.dropout_layer = nn.Dropout(p=0.2)
+ self.pyramid_conv_list0, self.pyramid_fc_list0 = self.basic_branch(
+ num_conv_out_channels, input_ch)
+
+ def basic_branch(self, num_conv_out_channels, input_ch):
+ # the level indexes are defined from fine to coarse,
+ # the branch will contain one more part than that of its previous level
+ # the sliding step is set to 1
+ pyramid_conv_list = nn.LayerList()
+ pyramid_fc_list = nn.LayerList()
+
+ idx_levels = 0
+ for idx_branches in range(self.num_branches):
+ if idx_branches >= sum(self.num_in_each_level[0:idx_levels + 1]):
+ idx_levels += 1
+
+ pyramid_conv_list.append(
+ nn.Sequential(
+ nn.Conv2D(input_ch, num_conv_out_channels, 1),
+ nn.BatchNorm2D(num_conv_out_channels), nn.ReLU()))
+
+ idx_levels = 0
+ for idx_branches in range(self.num_branches):
+ if idx_branches >= sum(self.num_in_each_level[0:idx_levels + 1]):
+ idx_levels += 1
+
+ fc = nn.Linear(
+ in_features=num_conv_out_channels,
+ out_features=self.num_classes,
+ weight_attr=ParamAttr(initializer=Normal(
+ mean=0., std=0.001)),
+ bias_attr=ParamAttr(initializer=Constant(value=0.)))
+ pyramid_fc_list.append(fc)
+ return pyramid_conv_list, pyramid_fc_list
+
+ def pyramid_forward(self, feat):
+ each_stripe_size = int(feat.shape[2] / self.num_stripes)
+
+ feat_list, logits_list = [], []
+ idx_levels = 0
+ used_branches = 0
+ for idx_branches in range(self.num_branches):
+ if idx_branches >= sum(self.num_in_each_level[0:idx_levels + 1]):
+ idx_levels += 1
+ idx_in_each_level = idx_branches - sum(self.num_in_each_level[
+ 0:idx_levels])
+ stripe_size_in_each_level = each_stripe_size * (idx_levels + 1)
+ start = idx_in_each_level * each_stripe_size
+ end = start + stripe_size_in_each_level
+
+ k = feat.shape[-1]
+ local_feat_avgpool = F.avg_pool2d(
+ feat[:, :, start:end, :],
+ kernel_size=(stripe_size_in_each_level, k))
+ local_feat_maxpool = F.max_pool2d(
+ feat[:, :, start:end, :],
+ kernel_size=(stripe_size_in_each_level, k))
+ local_feat = local_feat_avgpool + local_feat_maxpool
+
+ local_feat = self.pyramid_conv_list0[used_branches](local_feat)
+ local_feat = paddle.reshape(
+ local_feat, shape=[local_feat.shape[0], -1])
+ feat_list.append(local_feat)
+
+ local_logits = self.pyramid_fc_list0[used_branches](
+ self.dropout_layer(local_feat))
+ logits_list.append(local_logits)
+
+ used_branches += 1
+
+ return feat_list, logits_list
+
+ def forward(self, x):
+ feat = self.base(x)
+ assert feat.shape[2] % self.num_stripes == 0
+ feat_list, logits_list = self.pyramid_forward(feat)
+ feat_out = paddle.concat(feat_list, axis=-1)
+ return feat_out
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/resnet.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/resnet.py
new file mode 100644
index 000000000..968fe9774
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/reid/resnet.py
@@ -0,0 +1,310 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import math
+import paddle
+from paddle import ParamAttr
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle.nn.initializer import Normal
+
+__all__ = ["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
+
+
+class ConvBNLayer(nn.Layer):
+ def __init__(self,
+ num_channels,
+ num_filters,
+ filter_size,
+ stride=1,
+ dilation=1,
+ groups=1,
+ act=None,
+ lr_mult=1.0,
+ name=None,
+ data_format="NCHW"):
+ super(ConvBNLayer, self).__init__()
+ conv_stdv = filter_size * filter_size * num_filters
+ self._conv = nn.Conv2D(
+ in_channels=num_channels,
+ out_channels=num_filters,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ dilation=dilation,
+ groups=groups,
+ weight_attr=ParamAttr(
+ learning_rate=lr_mult,
+ initializer=Normal(0, math.sqrt(2. / conv_stdv))),
+ bias_attr=False,
+ data_format=data_format)
+
+ self._batch_norm = nn.BatchNorm(
+ num_filters, act=act, data_layout=data_format)
+
+ def forward(self, inputs):
+ y = self._conv(inputs)
+ y = self._batch_norm(y)
+ return y
+
+
+class BottleneckBlock(nn.Layer):
+ def __init__(self,
+ num_channels,
+ num_filters,
+ stride,
+ shortcut=True,
+ name=None,
+ lr_mult=1.0,
+ dilation=1,
+ data_format="NCHW"):
+ super(BottleneckBlock, self).__init__()
+ self.conv0 = ConvBNLayer(
+ num_channels=num_channels,
+ num_filters=num_filters,
+ filter_size=1,
+ dilation=dilation,
+ act="relu",
+ lr_mult=lr_mult,
+ name=name + "_branch2a",
+ data_format=data_format)
+ self.conv1 = ConvBNLayer(
+ num_channels=num_filters,
+ num_filters=num_filters,
+ filter_size=3,
+ dilation=dilation,
+ stride=stride,
+ act="relu",
+ lr_mult=lr_mult,
+ name=name + "_branch2b",
+ data_format=data_format)
+ self.conv2 = ConvBNLayer(
+ num_channels=num_filters,
+ num_filters=num_filters * 4,
+ filter_size=1,
+ dilation=dilation,
+ act=None,
+ lr_mult=lr_mult,
+ name=name + "_branch2c",
+ data_format=data_format)
+ if not shortcut:
+ self.short = ConvBNLayer(
+ num_channels=num_channels,
+ num_filters=num_filters * 4,
+ filter_size=1,
+ dilation=dilation,
+ stride=stride,
+ lr_mult=lr_mult,
+ name=name + "_branch1",
+ data_format=data_format)
+ self.shortcut = shortcut
+ self._num_channels_out = num_filters * 4
+
+ def forward(self, inputs):
+ y = self.conv0(inputs)
+ conv1 = self.conv1(y)
+ conv2 = self.conv2(conv1)
+ if self.shortcut:
+ short = inputs
+ else:
+ short = self.short(inputs)
+ y = paddle.add(x=short, y=conv2)
+ y = F.relu(y)
+ return y
+
+
+class BasicBlock(nn.Layer):
+ def __init__(self,
+ num_channels,
+ num_filters,
+ stride,
+ shortcut=True,
+ name=None,
+ data_format="NCHW"):
+ super(BasicBlock, self).__init__()
+ self.stride = stride
+ self.conv0 = ConvBNLayer(
+ num_channels=num_channels,
+ num_filters=num_filters,
+ filter_size=3,
+ stride=stride,
+ act="relu",
+ name=name + "_branch2a",
+ data_format=data_format)
+ self.conv1 = ConvBNLayer(
+ num_channels=num_filters,
+ num_filters=num_filters,
+ filter_size=3,
+ act=None,
+ name=name + "_branch2b",
+ data_format=data_format)
+ if not shortcut:
+ self.short = ConvBNLayer(
+ num_channels=num_channels,
+ num_filters=num_filters,
+ filter_size=1,
+ stride=stride,
+ name=name + "_branch1",
+ data_format=data_format)
+ self.shortcut = shortcut
+
+ def forward(self, inputs):
+ y = self.conv0(inputs)
+ conv1 = self.conv1(y)
+ if self.shortcut:
+ short = inputs
+ else:
+ short = self.short(inputs)
+ y = paddle.add(x=short, y=conv1)
+ y = F.relu(y)
+ return y
+
+
+class ResNet(nn.Layer):
+ def __init__(self,
+ layers=50,
+ lr_mult=1.0,
+ last_conv_stride=2,
+ last_conv_dilation=1):
+ super(ResNet, self).__init__()
+ self.layers = layers
+ self.data_format = "NCHW"
+ self.input_image_channel = 3
+ supported_layers = [18, 34, 50, 101, 152]
+ assert layers in supported_layers, \
+ "supported layers are {} but input layer is {}".format(
+ supported_layers, layers)
+ if layers == 18:
+ depth = [2, 2, 2, 2]
+ elif layers == 34 or layers == 50:
+ depth = [3, 4, 6, 3]
+ elif layers == 101:
+ depth = [3, 4, 23, 3]
+ elif layers == 152:
+ depth = [3, 8, 36, 3]
+ num_channels = [64, 256, 512,
+ 1024] if layers >= 50 else [64, 64, 128, 256]
+ num_filters = [64, 128, 256, 512]
+ self.conv = ConvBNLayer(
+ num_channels=self.input_image_channel,
+ num_filters=64,
+ filter_size=7,
+ stride=2,
+ act="relu",
+ lr_mult=lr_mult,
+ name="conv1",
+ data_format=self.data_format)
+ self.pool2d_max = nn.MaxPool2D(
+ kernel_size=3, stride=2, padding=1, data_format=self.data_format)
+ self.block_list = []
+ if layers >= 50:
+ for block in range(len(depth)):
+ shortcut = False
+ for i in range(depth[block]):
+ if layers in [101, 152] and block == 2:
+ if i == 0:
+ conv_name = "res" + str(block + 2) + "a"
+ else:
+ conv_name = "res" + str(block + 2) + "b" + str(i)
+ else:
+ conv_name = "res" + str(block + 2) + chr(97 + i)
+ if i != 0 or block == 0:
+ stride = 1
+ elif block == len(depth) - 1:
+ stride = last_conv_stride
+ else:
+ stride = 2
+ bottleneck_block = self.add_sublayer(
+ conv_name,
+ BottleneckBlock(
+ num_channels=num_channels[block]
+ if i == 0 else num_filters[block] * 4,
+ num_filters=num_filters[block],
+ stride=stride,
+ shortcut=shortcut,
+ name=conv_name,
+ lr_mult=lr_mult,
+ dilation=last_conv_dilation
+ if block == len(depth) - 1 else 1,
+ data_format=self.data_format))
+ self.block_list.append(bottleneck_block)
+ shortcut = True
+ else:
+ for block in range(len(depth)):
+ shortcut = False
+ for i in range(depth[block]):
+ conv_name = "res" + str(block + 2) + chr(97 + i)
+ basic_block = self.add_sublayer(
+ conv_name,
+ BasicBlock(
+ num_channels=num_channels[block]
+ if i == 0 else num_filters[block],
+ num_filters=num_filters[block],
+ stride=2 if i == 0 and block != 0 else 1,
+ shortcut=shortcut,
+ name=conv_name,
+ data_format=self.data_format))
+ self.block_list.append(basic_block)
+ shortcut = True
+
+ def forward(self, inputs):
+ y = self.conv(inputs)
+ y = self.pool2d_max(y)
+ for block in self.block_list:
+ y = block(y)
+ return y
+
+
+def ResNet18(**args):
+ model = ResNet(layers=18, **args)
+ return model
+
+
+def ResNet34(**args):
+ model = ResNet(layers=34, **args)
+ return model
+
+
+def ResNet50(pretrained=None, **args):
+ model = ResNet(layers=50, **args)
+ if pretrained is not None:
+ if not (os.path.isdir(pretrained) or
+ os.path.exists(pretrained + '.pdparams')):
+ raise ValueError("Model pretrain path {} does not "
+ "exists.".format(pretrained))
+ param_state_dict = paddle.load(pretrained + '.pdparams')
+ model.set_dict(param_state_dict)
+ return model
+
+
+def ResNet101(pretrained=None, **args):
+ model = ResNet(layers=101, **args)
+ if pretrained is not None:
+ if not (os.path.isdir(pretrained) or
+ os.path.exists(pretrained + '.pdparams')):
+ raise ValueError("Model pretrain path {} does not "
+ "exists.".format(pretrained))
+ param_state_dict = paddle.load(pretrained + '.pdparams')
+ model.set_dict(param_state_dict)
+ return model
+
+
+def ResNet152(**args):
+ model = ResNet(layers=152, **args)
+ return model
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/shape_spec.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/shape_spec.py
new file mode 100644
index 000000000..81601fd64
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/shape_spec.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The code is based on:
+# https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/shape_spec.py
+
+from collections import namedtuple
+
+
+class ShapeSpec(
+ namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
+ def __new__(cls, channels=None, height=None, width=None, stride=None):
+ return super(ShapeSpec, cls).__new__(cls, channels, height, width,
+ stride)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/__init__.py
new file mode 100644
index 000000000..847ddc47a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_architectures.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_architectures.py
new file mode 100644
index 000000000..25767e74a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_architectures.py
@@ -0,0 +1,69 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import unittest
+import ppdet
+
+
+class TestFasterRCNN(unittest.TestCase):
+ def setUp(self):
+ self.set_config()
+
+ def set_config(self):
+ self.cfg_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml'
+
+ def test_trainer(self):
+ # Trainer __init__ will build model and DataLoader
+ # 'train' and 'eval' mode include dataset loading
+ # use 'test' mode to simplify tests
+ cfg = ppdet.core.workspace.load_config(self.cfg_file)
+ trainer = ppdet.engine.Trainer(cfg, mode='test')
+
+
+class TestMaskRCNN(TestFasterRCNN):
+ def set_config(self):
+ self.cfg_file = 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml'
+
+
+class TestCascadeRCNN(TestFasterRCNN):
+ def set_config(self):
+ self.cfg_file = 'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.yml'
+
+
+class TestYolov3(TestFasterRCNN):
+ def set_config(self):
+ self.cfg_file = 'configs/yolov3/yolov3_darknet53_270e_coco.yml'
+
+
+class TestSSD(TestFasterRCNN):
+ def set_config(self):
+ self.cfg_file = 'configs/ssd/ssd_vgg16_300_240e_voc.yml'
+
+
+class TestGFL(TestFasterRCNN):
+ def set_config(self):
+ self.cfg_file = 'configs/gfl/gfl_r50_fpn_1x_coco.yml'
+
+
+class TestPicoDet(TestFasterRCNN):
+ def set_config(self):
+ self.cfg_file = 'configs/picodet/picodet_s_320_coco.yml'
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_base.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_base.py
new file mode 100644
index 000000000..cbb9033b3
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_base.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import unittest
+
+import contextlib
+
+import paddle
+import paddle.fluid as fluid
+from paddle.fluid.framework import Program
+from paddle.fluid import core
+
+
+class LayerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.seed = 111
+
+ @classmethod
+ def tearDownClass(cls):
+ pass
+
+ def _get_place(self, force_to_use_cpu=False):
+ # this option for ops that only have cpu kernel
+ if force_to_use_cpu:
+ return core.CPUPlace()
+ else:
+ if core.is_compiled_with_cuda():
+ return core.CUDAPlace(0)
+ return core.CPUPlace()
+
+ @contextlib.contextmanager
+ def static_graph(self):
+ paddle.enable_static()
+ scope = fluid.core.Scope()
+ program = Program()
+ with fluid.scope_guard(scope):
+ with fluid.program_guard(program):
+ paddle.seed(self.seed)
+ paddle.framework.random._manual_program_seed(self.seed)
+ yield
+
+ def get_static_graph_result(self,
+ feed,
+ fetch_list,
+ with_lod=False,
+ force_to_use_cpu=False):
+ exe = fluid.Executor(self._get_place(force_to_use_cpu))
+ exe.run(fluid.default_startup_program())
+ return exe.run(fluid.default_main_program(),
+ feed=feed,
+ fetch_list=fetch_list,
+ return_numpy=(not with_lod))
+
+ @contextlib.contextmanager
+ def dynamic_graph(self, force_to_use_cpu=False):
+ paddle.disable_static()
+ with fluid.dygraph.guard(
+ self._get_place(force_to_use_cpu=force_to_use_cpu)):
+ paddle.seed(self.seed)
+ paddle.framework.random._manual_program_seed(self.seed)
+ yield
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_ops.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_ops.py
new file mode 100644
index 000000000..d4b574748
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_ops.py
@@ -0,0 +1,835 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import os, sys
+# add python path of PadleDetection to sys.path
+parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 4)))
+if parent_path not in sys.path:
+ sys.path.append(parent_path)
+
+import unittest
+import numpy as np
+
+import paddle
+import paddle.fluid as fluid
+from paddle.fluid.dygraph import base
+
+import ppdet.modeling.ops as ops
+from ppdet.modeling.tests.test_base import LayerTest
+
+
+def make_rois(h, w, rois_num, output_size):
+ rois = np.zeros((0, 4)).astype('float32')
+ for roi_num in rois_num:
+ roi = np.zeros((roi_num, 4)).astype('float32')
+ roi[:, 0] = np.random.randint(0, h - output_size[0], size=roi_num)
+ roi[:, 1] = np.random.randint(0, w - output_size[1], size=roi_num)
+ roi[:, 2] = np.random.randint(roi[:, 0] + output_size[0], h)
+ roi[:, 3] = np.random.randint(roi[:, 1] + output_size[1], w)
+ rois = np.vstack((rois, roi))
+ return rois
+
+
+def softmax(x):
+ # clip to shiftx, otherwise, when calc loss with
+ # log(exp(shiftx)), may get log(0)=INF
+ shiftx = (x - np.max(x)).clip(-64.)
+ exps = np.exp(shiftx)
+ return exps / np.sum(exps)
+
+
+class TestCollectFpnProposals(LayerTest):
+ def test_collect_fpn_proposals(self):
+ multi_bboxes_np = []
+ multi_scores_np = []
+ rois_num_per_level_np = []
+ for i in range(4):
+ bboxes_np = np.random.rand(5, 4).astype('float32')
+ scores_np = np.random.rand(5, 1).astype('float32')
+ rois_num = np.array([2, 3]).astype('int32')
+ multi_bboxes_np.append(bboxes_np)
+ multi_scores_np.append(scores_np)
+ rois_num_per_level_np.append(rois_num)
+
+ with self.static_graph():
+ multi_bboxes = []
+ multi_scores = []
+ rois_num_per_level = []
+ for i in range(4):
+ bboxes = paddle.static.data(
+ name='rois' + str(i),
+ shape=[5, 4],
+ dtype='float32',
+ lod_level=1)
+ scores = paddle.static.data(
+ name='scores' + str(i),
+ shape=[5, 1],
+ dtype='float32',
+ lod_level=1)
+ rois_num = paddle.static.data(
+ name='rois_num' + str(i), shape=[None], dtype='int32')
+
+ multi_bboxes.append(bboxes)
+ multi_scores.append(scores)
+ rois_num_per_level.append(rois_num)
+
+ fpn_rois, rois_num = ops.collect_fpn_proposals(
+ multi_bboxes,
+ multi_scores,
+ 2,
+ 5,
+ 10,
+ rois_num_per_level=rois_num_per_level)
+ feed = {}
+ for i in range(4):
+ feed['rois' + str(i)] = multi_bboxes_np[i]
+ feed['scores' + str(i)] = multi_scores_np[i]
+ feed['rois_num' + str(i)] = rois_num_per_level_np[i]
+ fpn_rois_stat, rois_num_stat = self.get_static_graph_result(
+ feed=feed, fetch_list=[fpn_rois, rois_num], with_lod=True)
+ fpn_rois_stat = np.array(fpn_rois_stat)
+ rois_num_stat = np.array(rois_num_stat)
+
+ with self.dynamic_graph():
+ multi_bboxes_dy = []
+ multi_scores_dy = []
+ rois_num_per_level_dy = []
+ for i in range(4):
+ bboxes_dy = base.to_variable(multi_bboxes_np[i])
+ scores_dy = base.to_variable(multi_scores_np[i])
+ rois_num_dy = base.to_variable(rois_num_per_level_np[i])
+ multi_bboxes_dy.append(bboxes_dy)
+ multi_scores_dy.append(scores_dy)
+ rois_num_per_level_dy.append(rois_num_dy)
+ fpn_rois_dy, rois_num_dy = ops.collect_fpn_proposals(
+ multi_bboxes_dy,
+ multi_scores_dy,
+ 2,
+ 5,
+ 10,
+ rois_num_per_level=rois_num_per_level_dy)
+ fpn_rois_dy = fpn_rois_dy.numpy()
+ rois_num_dy = rois_num_dy.numpy()
+
+ self.assertTrue(np.array_equal(fpn_rois_stat, fpn_rois_dy))
+ self.assertTrue(np.array_equal(rois_num_stat, rois_num_dy))
+
+ def test_collect_fpn_proposals_error(self):
+ def generate_input(bbox_type, score_type, name):
+ multi_bboxes = []
+ multi_scores = []
+ for i in range(4):
+ bboxes = paddle.static.data(
+ name='rois' + name + str(i),
+ shape=[10, 4],
+ dtype=bbox_type,
+ lod_level=1)
+ scores = paddle.static.data(
+ name='scores' + name + str(i),
+ shape=[10, 1],
+ dtype=score_type,
+ lod_level=1)
+ multi_bboxes.append(bboxes)
+ multi_scores.append(scores)
+ return multi_bboxes, multi_scores
+
+ with self.static_graph():
+ bbox1 = paddle.static.data(
+ name='rois', shape=[5, 10, 4], dtype='float32', lod_level=1)
+ score1 = paddle.static.data(
+ name='scores', shape=[5, 10, 1], dtype='float32', lod_level=1)
+ bbox2, score2 = generate_input('int32', 'float32', '2')
+ self.assertRaises(
+ TypeError,
+ ops.collect_fpn_proposals,
+ multi_rois=bbox1,
+ multi_scores=score1,
+ min_level=2,
+ max_level=5,
+ post_nms_top_n=2000)
+ self.assertRaises(
+ TypeError,
+ ops.collect_fpn_proposals,
+ multi_rois=bbox2,
+ multi_scores=score2,
+ min_level=2,
+ max_level=5,
+ post_nms_top_n=2000)
+
+ paddle.disable_static()
+
+
+class TestDistributeFpnProposals(LayerTest):
+ def test_distribute_fpn_proposals(self):
+ rois_np = np.random.rand(10, 4).astype('float32')
+ rois_num_np = np.array([4, 6]).astype('int32')
+ with self.static_graph():
+ rois = paddle.static.data(
+ name='rois', shape=[10, 4], dtype='float32')
+ rois_num = paddle.static.data(
+ name='rois_num', shape=[None], dtype='int32')
+ multi_rois, restore_ind, rois_num_per_level = ops.distribute_fpn_proposals(
+ fpn_rois=rois,
+ min_level=2,
+ max_level=5,
+ refer_level=4,
+ refer_scale=224,
+ rois_num=rois_num)
+ fetch_list = multi_rois + [restore_ind] + rois_num_per_level
+ output_stat = self.get_static_graph_result(
+ feed={'rois': rois_np,
+ 'rois_num': rois_num_np},
+ fetch_list=fetch_list,
+ with_lod=True)
+ output_stat_np = []
+ for output in output_stat:
+ output_np = np.array(output)
+ if len(output_np) > 0:
+ output_stat_np.append(output_np)
+
+ with self.dynamic_graph():
+ rois_dy = base.to_variable(rois_np)
+ rois_num_dy = base.to_variable(rois_num_np)
+ multi_rois_dy, restore_ind_dy, rois_num_per_level_dy = ops.distribute_fpn_proposals(
+ fpn_rois=rois_dy,
+ min_level=2,
+ max_level=5,
+ refer_level=4,
+ refer_scale=224,
+ rois_num=rois_num_dy)
+ output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy
+ output_dy_np = []
+ for output in output_dy:
+ output_np = output.numpy()
+ if len(output_np) > 0:
+ output_dy_np.append(output_np)
+
+ for res_stat, res_dy in zip(output_stat_np, output_dy_np):
+ self.assertTrue(np.array_equal(res_stat, res_dy))
+
+ def test_distribute_fpn_proposals_error(self):
+ with self.static_graph():
+ fpn_rois = paddle.static.data(
+ name='data_error', shape=[10, 4], dtype='int32', lod_level=1)
+ self.assertRaises(
+ TypeError,
+ ops.distribute_fpn_proposals,
+ fpn_rois=fpn_rois,
+ min_level=2,
+ max_level=5,
+ refer_level=4,
+ refer_scale=224)
+
+ paddle.disable_static()
+
+
+class TestROIAlign(LayerTest):
+ def test_roi_align(self):
+ b, c, h, w = 2, 12, 20, 20
+ inputs_np = np.random.rand(b, c, h, w).astype('float32')
+ rois_num = [4, 6]
+ output_size = (7, 7)
+ rois_np = make_rois(h, w, rois_num, output_size)
+ rois_num_np = np.array(rois_num).astype('int32')
+ with self.static_graph():
+ inputs = paddle.static.data(
+ name='inputs', shape=[b, c, h, w], dtype='float32')
+ rois = paddle.static.data(
+ name='rois', shape=[10, 4], dtype='float32')
+ rois_num = paddle.static.data(
+ name='rois_num', shape=[None], dtype='int32')
+
+ output = ops.roi_align(
+ input=inputs,
+ rois=rois,
+ output_size=output_size,
+ rois_num=rois_num)
+ output_np, = self.get_static_graph_result(
+ feed={
+ 'inputs': inputs_np,
+ 'rois': rois_np,
+ 'rois_num': rois_num_np
+ },
+ fetch_list=output,
+ with_lod=False)
+
+ with self.dynamic_graph():
+ inputs_dy = base.to_variable(inputs_np)
+ rois_dy = base.to_variable(rois_np)
+ rois_num_dy = base.to_variable(rois_num_np)
+
+ output_dy = ops.roi_align(
+ input=inputs_dy,
+ rois=rois_dy,
+ output_size=output_size,
+ rois_num=rois_num_dy)
+ output_dy_np = output_dy.numpy()
+
+ self.assertTrue(np.array_equal(output_np, output_dy_np))
+
+ def test_roi_align_error(self):
+ with self.static_graph():
+ inputs = paddle.static.data(
+ name='inputs', shape=[2, 12, 20, 20], dtype='float32')
+ rois = paddle.static.data(
+ name='data_error', shape=[10, 4], dtype='int32', lod_level=1)
+ self.assertRaises(
+ TypeError,
+ ops.roi_align,
+ input=inputs,
+ rois=rois,
+ output_size=(7, 7))
+
+ paddle.disable_static()
+
+
+class TestROIPool(LayerTest):
+ def test_roi_pool(self):
+ b, c, h, w = 2, 12, 20, 20
+ inputs_np = np.random.rand(b, c, h, w).astype('float32')
+ rois_num = [4, 6]
+ output_size = (7, 7)
+ rois_np = make_rois(h, w, rois_num, output_size)
+ rois_num_np = np.array(rois_num).astype('int32')
+ with self.static_graph():
+ inputs = paddle.static.data(
+ name='inputs', shape=[b, c, h, w], dtype='float32')
+ rois = paddle.static.data(
+ name='rois', shape=[10, 4], dtype='float32')
+ rois_num = paddle.static.data(
+ name='rois_num', shape=[None], dtype='int32')
+
+ output, _ = ops.roi_pool(
+ input=inputs,
+ rois=rois,
+ output_size=output_size,
+ rois_num=rois_num)
+ output_np, = self.get_static_graph_result(
+ feed={
+ 'inputs': inputs_np,
+ 'rois': rois_np,
+ 'rois_num': rois_num_np
+ },
+ fetch_list=[output],
+ with_lod=False)
+
+ with self.dynamic_graph():
+ inputs_dy = base.to_variable(inputs_np)
+ rois_dy = base.to_variable(rois_np)
+ rois_num_dy = base.to_variable(rois_num_np)
+
+ output_dy, _ = ops.roi_pool(
+ input=inputs_dy,
+ rois=rois_dy,
+ output_size=output_size,
+ rois_num=rois_num_dy)
+ output_dy_np = output_dy.numpy()
+
+ self.assertTrue(np.array_equal(output_np, output_dy_np))
+
+ def test_roi_pool_error(self):
+ with self.static_graph():
+ inputs = paddle.static.data(
+ name='inputs', shape=[2, 12, 20, 20], dtype='float32')
+ rois = paddle.static.data(
+ name='data_error', shape=[10, 4], dtype='int32', lod_level=1)
+ self.assertRaises(
+ TypeError,
+ ops.roi_pool,
+ input=inputs,
+ rois=rois,
+ output_size=(7, 7))
+
+ paddle.disable_static()
+
+
+class TestIoUSimilarity(LayerTest):
+ def test_iou_similarity(self):
+ b, c, h, w = 2, 12, 20, 20
+ inputs_np = np.random.rand(b, c, h, w).astype('float32')
+ output_size = (7, 7)
+ x_np = make_rois(h, w, [20], output_size)
+ y_np = make_rois(h, w, [10], output_size)
+ with self.static_graph():
+ x = paddle.static.data(name='x', shape=[20, 4], dtype='float32')
+ y = paddle.static.data(name='y', shape=[10, 4], dtype='float32')
+
+ iou = ops.iou_similarity(x=x, y=y)
+ iou_np, = self.get_static_graph_result(
+ feed={
+ 'x': x_np,
+ 'y': y_np,
+ }, fetch_list=[iou], with_lod=False)
+
+ with self.dynamic_graph():
+ x_dy = base.to_variable(x_np)
+ y_dy = base.to_variable(y_np)
+
+ iou_dy = ops.iou_similarity(x=x_dy, y=y_dy)
+ iou_dy_np = iou_dy.numpy()
+
+ self.assertTrue(np.array_equal(iou_np, iou_dy_np))
+
+
+class TestBipartiteMatch(LayerTest):
+ def test_bipartite_match(self):
+ distance = np.random.random((20, 10)).astype('float32')
+ with self.static_graph():
+ x = paddle.static.data(name='x', shape=[20, 10], dtype='float32')
+
+ match_indices, match_dist = ops.bipartite_match(
+ x, match_type='per_prediction', dist_threshold=0.5)
+ match_indices_np, match_dist_np = self.get_static_graph_result(
+ feed={'x': distance, },
+ fetch_list=[match_indices, match_dist],
+ with_lod=False)
+
+ with self.dynamic_graph():
+ x_dy = base.to_variable(distance)
+
+ match_indices_dy, match_dist_dy = ops.bipartite_match(
+ x_dy, match_type='per_prediction', dist_threshold=0.5)
+ match_indices_dy_np = match_indices_dy.numpy()
+ match_dist_dy_np = match_dist_dy.numpy()
+
+ self.assertTrue(np.array_equal(match_indices_np, match_indices_dy_np))
+ self.assertTrue(np.array_equal(match_dist_np, match_dist_dy_np))
+
+
+class TestYoloBox(LayerTest):
+ def test_yolo_box(self):
+
+ # x shape [N C H W], C=K * (5 + class_num), class_num=10, K=2
+ np_x = np.random.random([1, 30, 7, 7]).astype('float32')
+ np_origin_shape = np.array([[608, 608]], dtype='int32')
+ class_num = 10
+ conf_thresh = 0.01
+ downsample_ratio = 32
+ scale_x_y = 1.2
+
+ # static
+ with self.static_graph():
+ # x shape [N C H W], C=K * (5 + class_num), class_num=10, K=2
+ x = paddle.static.data(
+ name='x', shape=[1, 30, 7, 7], dtype='float32')
+ origin_shape = paddle.static.data(
+ name='origin_shape', shape=[1, 2], dtype='int32')
+
+ boxes, scores = ops.yolo_box(
+ x,
+ origin_shape, [10, 13, 30, 13],
+ class_num,
+ conf_thresh,
+ downsample_ratio,
+ scale_x_y=scale_x_y)
+
+ boxes_np, scores_np = self.get_static_graph_result(
+ feed={
+ 'x': np_x,
+ 'origin_shape': np_origin_shape,
+ },
+ fetch_list=[boxes, scores],
+ with_lod=False)
+
+ # dygraph
+ with self.dynamic_graph():
+ x_dy = fluid.layers.assign(np_x)
+ origin_shape_dy = fluid.layers.assign(np_origin_shape)
+
+ boxes_dy, scores_dy = ops.yolo_box(
+ x_dy,
+ origin_shape_dy, [10, 13, 30, 13],
+ 10,
+ 0.01,
+ 32,
+ scale_x_y=scale_x_y)
+
+ boxes_dy_np = boxes_dy.numpy()
+ scores_dy_np = scores_dy.numpy()
+
+ self.assertTrue(np.array_equal(boxes_np, boxes_dy_np))
+ self.assertTrue(np.array_equal(scores_np, scores_dy_np))
+
+ def test_yolo_box_error(self):
+ with self.static_graph():
+ # x shape [N C H W], C=K * (5 + class_num), class_num=10, K=2
+ x = paddle.static.data(
+ name='x', shape=[1, 30, 7, 7], dtype='float32')
+ origin_shape = paddle.static.data(
+ name='origin_shape', shape=[1, 2], dtype='int32')
+
+ self.assertRaises(
+ TypeError,
+ ops.yolo_box,
+ x,
+ origin_shape, [10, 13, 30, 13],
+ 10.123,
+ 0.01,
+ 32,
+ scale_x_y=1.2)
+
+ paddle.disable_static()
+
+
+class TestPriorBox(LayerTest):
+ def test_prior_box(self):
+ input_np = np.random.rand(2, 10, 32, 32).astype('float32')
+ image_np = np.random.rand(2, 10, 40, 40).astype('float32')
+ min_sizes = [2, 4]
+ with self.static_graph():
+ input = paddle.static.data(
+ name='input', shape=[2, 10, 32, 32], dtype='float32')
+ image = paddle.static.data(
+ name='image', shape=[2, 10, 40, 40], dtype='float32')
+
+ box, var = ops.prior_box(
+ input=input,
+ image=image,
+ min_sizes=min_sizes,
+ clip=True,
+ flip=True)
+ box_np, var_np = self.get_static_graph_result(
+ feed={
+ 'input': input_np,
+ 'image': image_np,
+ },
+ fetch_list=[box, var],
+ with_lod=False)
+
+ with self.dynamic_graph():
+ inputs_dy = base.to_variable(input_np)
+ image_dy = base.to_variable(image_np)
+
+ box_dy, var_dy = ops.prior_box(
+ input=inputs_dy,
+ image=image_dy,
+ min_sizes=min_sizes,
+ clip=True,
+ flip=True)
+ box_dy_np = box_dy.numpy()
+ var_dy_np = var_dy.numpy()
+
+ self.assertTrue(np.array_equal(box_np, box_dy_np))
+ self.assertTrue(np.array_equal(var_np, var_dy_np))
+
+ def test_prior_box_error(self):
+ with self.static_graph():
+ input = paddle.static.data(
+ name='input', shape=[2, 10, 32, 32], dtype='int32')
+ image = paddle.static.data(
+ name='image', shape=[2, 10, 40, 40], dtype='int32')
+ self.assertRaises(
+ TypeError,
+ ops.prior_box,
+ input=input,
+ image=image,
+ min_sizes=[2, 4],
+ clip=True,
+ flip=True)
+
+ paddle.disable_static()
+
+
+class TestMulticlassNms(LayerTest):
+ def test_multiclass_nms(self):
+ boxes_np = np.random.rand(10, 81, 4).astype('float32')
+ scores_np = np.random.rand(10, 81).astype('float32')
+ rois_num_np = np.array([2, 8]).astype('int32')
+ with self.static_graph():
+ boxes = paddle.static.data(
+ name='bboxes',
+ shape=[None, 81, 4],
+ dtype='float32',
+ lod_level=1)
+ scores = paddle.static.data(
+ name='scores', shape=[None, 81], dtype='float32', lod_level=1)
+ rois_num = paddle.static.data(
+ name='rois_num', shape=[None], dtype='int32')
+
+ output = ops.multiclass_nms(
+ bboxes=boxes,
+ scores=scores,
+ background_label=0,
+ score_threshold=0.5,
+ nms_top_k=400,
+ nms_threshold=0.3,
+ keep_top_k=200,
+ normalized=False,
+ return_index=True,
+ rois_num=rois_num)
+ out_np, index_np, nms_rois_num_np = self.get_static_graph_result(
+ feed={
+ 'bboxes': boxes_np,
+ 'scores': scores_np,
+ 'rois_num': rois_num_np
+ },
+ fetch_list=output,
+ with_lod=True)
+ out_np = np.array(out_np)
+ index_np = np.array(index_np)
+ nms_rois_num_np = np.array(nms_rois_num_np)
+
+ with self.dynamic_graph():
+ boxes_dy = base.to_variable(boxes_np)
+ scores_dy = base.to_variable(scores_np)
+ rois_num_dy = base.to_variable(rois_num_np)
+
+ out_dy, index_dy, nms_rois_num_dy = ops.multiclass_nms(
+ bboxes=boxes_dy,
+ scores=scores_dy,
+ background_label=0,
+ score_threshold=0.5,
+ nms_top_k=400,
+ nms_threshold=0.3,
+ keep_top_k=200,
+ normalized=False,
+ return_index=True,
+ rois_num=rois_num_dy)
+ out_dy_np = out_dy.numpy()
+ index_dy_np = index_dy.numpy()
+ nms_rois_num_dy_np = nms_rois_num_dy.numpy()
+
+ self.assertTrue(np.array_equal(out_np, out_dy_np))
+ self.assertTrue(np.array_equal(index_np, index_dy_np))
+ self.assertTrue(np.array_equal(nms_rois_num_np, nms_rois_num_dy_np))
+
+ def test_multiclass_nms_error(self):
+ with self.static_graph():
+ boxes = paddle.static.data(
+ name='bboxes', shape=[81, 4], dtype='float32', lod_level=1)
+ scores = paddle.static.data(
+ name='scores', shape=[81], dtype='float32', lod_level=1)
+ rois_num = paddle.static.data(
+ name='rois_num', shape=[40, 41], dtype='int32')
+ self.assertRaises(
+ TypeError,
+ ops.multiclass_nms,
+ boxes=boxes,
+ scores=scores,
+ background_label=0,
+ score_threshold=0.5,
+ nms_top_k=400,
+ nms_threshold=0.3,
+ keep_top_k=200,
+ normalized=False,
+ return_index=True,
+ rois_num=rois_num)
+
+
+class TestMatrixNMS(LayerTest):
+ def test_matrix_nms(self):
+ N, M, C = 7, 1200, 21
+ BOX_SIZE = 4
+ nms_top_k = 400
+ keep_top_k = 200
+ score_threshold = 0.01
+ post_threshold = 0.
+
+ scores_np = np.random.random((N * M, C)).astype('float32')
+ scores_np = np.apply_along_axis(softmax, 1, scores_np)
+ scores_np = np.reshape(scores_np, (N, M, C))
+ scores_np = np.transpose(scores_np, (0, 2, 1))
+
+ boxes_np = np.random.random((N, M, BOX_SIZE)).astype('float32')
+ boxes_np[:, :, 0:2] = boxes_np[:, :, 0:2] * 0.5
+ boxes_np[:, :, 2:4] = boxes_np[:, :, 2:4] * 0.5 + 0.5
+
+ with self.static_graph():
+ boxes = paddle.static.data(
+ name='boxes', shape=[N, M, BOX_SIZE], dtype='float32')
+ scores = paddle.static.data(
+ name='scores', shape=[N, C, M], dtype='float32')
+ out, index, _ = ops.matrix_nms(
+ bboxes=boxes,
+ scores=scores,
+ score_threshold=score_threshold,
+ post_threshold=post_threshold,
+ nms_top_k=nms_top_k,
+ keep_top_k=keep_top_k,
+ return_index=True)
+ out_np, index_np = self.get_static_graph_result(
+ feed={'boxes': boxes_np,
+ 'scores': scores_np},
+ fetch_list=[out, index],
+ with_lod=True)
+
+ with self.dynamic_graph():
+ boxes_dy = base.to_variable(boxes_np)
+ scores_dy = base.to_variable(scores_np)
+
+ out_dy, index_dy, _ = ops.matrix_nms(
+ bboxes=boxes_dy,
+ scores=scores_dy,
+ score_threshold=score_threshold,
+ post_threshold=post_threshold,
+ nms_top_k=nms_top_k,
+ keep_top_k=keep_top_k,
+ return_index=True)
+ out_dy_np = out_dy.numpy()
+ index_dy_np = index_dy.numpy()
+
+ self.assertTrue(np.array_equal(out_np, out_dy_np))
+ self.assertTrue(np.array_equal(index_np, index_dy_np))
+
+ def test_matrix_nms_error(self):
+ with self.static_graph():
+ bboxes = paddle.static.data(
+ name='bboxes', shape=[7, 1200, 4], dtype='float32')
+ scores = paddle.static.data(
+ name='data_error', shape=[7, 21, 1200], dtype='int32')
+ self.assertRaises(
+ TypeError,
+ ops.matrix_nms,
+ bboxes=bboxes,
+ scores=scores,
+ score_threshold=0.01,
+ post_threshold=0.,
+ nms_top_k=400,
+ keep_top_k=200,
+ return_index=True)
+
+ paddle.disable_static()
+
+
+class TestBoxCoder(LayerTest):
+ def test_box_coder(self):
+
+ prior_box_np = np.random.random((81, 4)).astype('float32')
+ prior_box_var_np = np.random.random((81, 4)).astype('float32')
+ target_box_np = np.random.random((20, 81, 4)).astype('float32')
+
+ # static
+ with self.static_graph():
+ prior_box = paddle.static.data(
+ name='prior_box', shape=[81, 4], dtype='float32')
+ prior_box_var = paddle.static.data(
+ name='prior_box_var', shape=[81, 4], dtype='float32')
+ target_box = paddle.static.data(
+ name='target_box', shape=[20, 81, 4], dtype='float32')
+
+ boxes = ops.box_coder(
+ prior_box=prior_box,
+ prior_box_var=prior_box_var,
+ target_box=target_box,
+ code_type="decode_center_size",
+ box_normalized=False)
+
+ boxes_np, = self.get_static_graph_result(
+ feed={
+ 'prior_box': prior_box_np,
+ 'prior_box_var': prior_box_var_np,
+ 'target_box': target_box_np,
+ },
+ fetch_list=[boxes],
+ with_lod=False)
+
+ # dygraph
+ with self.dynamic_graph():
+ prior_box_dy = base.to_variable(prior_box_np)
+ prior_box_var_dy = base.to_variable(prior_box_var_np)
+ target_box_dy = base.to_variable(target_box_np)
+
+ boxes_dy = ops.box_coder(
+ prior_box=prior_box_dy,
+ prior_box_var=prior_box_var_dy,
+ target_box=target_box_dy,
+ code_type="decode_center_size",
+ box_normalized=False)
+
+ boxes_dy_np = boxes_dy.numpy()
+
+ self.assertTrue(np.array_equal(boxes_np, boxes_dy_np))
+
+ def test_box_coder_error(self):
+ with self.static_graph():
+ prior_box = paddle.static.data(
+ name='prior_box', shape=[81, 4], dtype='int32')
+ prior_box_var = paddle.static.data(
+ name='prior_box_var', shape=[81, 4], dtype='float32')
+ target_box = paddle.static.data(
+ name='target_box', shape=[20, 81, 4], dtype='float32')
+
+ self.assertRaises(TypeError, ops.box_coder, prior_box,
+ prior_box_var, target_box)
+
+ paddle.disable_static()
+
+
+class TestGenerateProposals(LayerTest):
+ def test_generate_proposals(self):
+ scores_np = np.random.rand(2, 3, 4, 4).astype('float32')
+ bbox_deltas_np = np.random.rand(2, 12, 4, 4).astype('float32')
+ im_shape_np = np.array([[8, 8], [6, 6]]).astype('float32')
+ anchors_np = np.reshape(np.arange(4 * 4 * 3 * 4),
+ [4, 4, 3, 4]).astype('float32')
+ variances_np = np.ones((4, 4, 3, 4)).astype('float32')
+
+ with self.static_graph():
+ scores = paddle.static.data(
+ name='scores', shape=[2, 3, 4, 4], dtype='float32')
+ bbox_deltas = paddle.static.data(
+ name='bbox_deltas', shape=[2, 12, 4, 4], dtype='float32')
+ im_shape = paddle.static.data(
+ name='im_shape', shape=[2, 2], dtype='float32')
+ anchors = paddle.static.data(
+ name='anchors', shape=[4, 4, 3, 4], dtype='float32')
+ variances = paddle.static.data(
+ name='var', shape=[4, 4, 3, 4], dtype='float32')
+ rois, roi_probs, rois_num = ops.generate_proposals(
+ scores,
+ bbox_deltas,
+ im_shape,
+ anchors,
+ variances,
+ pre_nms_top_n=10,
+ post_nms_top_n=5,
+ return_rois_num=True)
+ rois_stat, roi_probs_stat, rois_num_stat = self.get_static_graph_result(
+ feed={
+ 'scores': scores_np,
+ 'bbox_deltas': bbox_deltas_np,
+ 'im_shape': im_shape_np,
+ 'anchors': anchors_np,
+ 'var': variances_np
+ },
+ fetch_list=[rois, roi_probs, rois_num],
+ with_lod=True)
+
+ with self.dynamic_graph():
+ scores_dy = base.to_variable(scores_np)
+ bbox_deltas_dy = base.to_variable(bbox_deltas_np)
+ im_shape_dy = base.to_variable(im_shape_np)
+ anchors_dy = base.to_variable(anchors_np)
+ variances_dy = base.to_variable(variances_np)
+ rois, roi_probs, rois_num = ops.generate_proposals(
+ scores_dy,
+ bbox_deltas_dy,
+ im_shape_dy,
+ anchors_dy,
+ variances_dy,
+ pre_nms_top_n=10,
+ post_nms_top_n=5,
+ return_rois_num=True)
+ rois_dy = rois.numpy()
+ roi_probs_dy = roi_probs.numpy()
+ rois_num_dy = rois_num.numpy()
+
+ self.assertTrue(np.array_equal(np.array(rois_stat), rois_dy))
+ self.assertTrue(np.array_equal(np.array(roi_probs_stat), roi_probs_dy))
+ self.assertTrue(np.array_equal(np.array(rois_num_stat), rois_num_dy))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_yolov3_loss.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_yolov3_loss.py
new file mode 100644
index 000000000..cec8bc940
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/tests/test_yolov3_loss.py
@@ -0,0 +1,414 @@
+# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import division
+
+import unittest
+
+import paddle
+from paddle import fluid
+# add python path of PadleDetection to sys.path
+import os
+import sys
+parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 4)))
+if parent_path not in sys.path:
+ sys.path.append(parent_path)
+
+from ppdet.modeling.losses import YOLOv3Loss
+from ppdet.data.transform.op_helper import jaccard_overlap
+import numpy as np
+
+
+def _split_ioup(output, an_num, num_classes):
+ """
+ Split output feature map to output, predicted iou
+ along channel dimension
+ """
+ ioup = fluid.layers.slice(output, axes=[1], starts=[0], ends=[an_num])
+ ioup = fluid.layers.sigmoid(ioup)
+ oriout = fluid.layers.slice(
+ output, axes=[1], starts=[an_num], ends=[an_num * (num_classes + 6)])
+ return (ioup, oriout)
+
+
+def _split_output(output, an_num, num_classes):
+ """
+ Split output feature map to x, y, w, h, objectness, classification
+ along channel dimension
+ """
+ x = fluid.layers.strided_slice(
+ output,
+ axes=[1],
+ starts=[0],
+ ends=[output.shape[1]],
+ strides=[5 + num_classes])
+ y = fluid.layers.strided_slice(
+ output,
+ axes=[1],
+ starts=[1],
+ ends=[output.shape[1]],
+ strides=[5 + num_classes])
+ w = fluid.layers.strided_slice(
+ output,
+ axes=[1],
+ starts=[2],
+ ends=[output.shape[1]],
+ strides=[5 + num_classes])
+ h = fluid.layers.strided_slice(
+ output,
+ axes=[1],
+ starts=[3],
+ ends=[output.shape[1]],
+ strides=[5 + num_classes])
+ obj = fluid.layers.strided_slice(
+ output,
+ axes=[1],
+ starts=[4],
+ ends=[output.shape[1]],
+ strides=[5 + num_classes])
+ clss = []
+ stride = output.shape[1] // an_num
+ for m in range(an_num):
+ clss.append(
+ fluid.layers.slice(
+ output,
+ axes=[1],
+ starts=[stride * m + 5],
+ ends=[stride * m + 5 + num_classes]))
+ cls = fluid.layers.transpose(
+ fluid.layers.stack(
+ clss, axis=1), perm=[0, 1, 3, 4, 2])
+ return (x, y, w, h, obj, cls)
+
+
+def _split_target(target):
+ """
+ split target to x, y, w, h, objectness, classification
+ along dimension 2
+ target is in shape [N, an_num, 6 + class_num, H, W]
+ """
+ tx = target[:, :, 0, :, :]
+ ty = target[:, :, 1, :, :]
+ tw = target[:, :, 2, :, :]
+ th = target[:, :, 3, :, :]
+ tscale = target[:, :, 4, :, :]
+ tobj = target[:, :, 5, :, :]
+ tcls = fluid.layers.transpose(target[:, :, 6:, :, :], perm=[0, 1, 3, 4, 2])
+ tcls.stop_gradient = True
+ return (tx, ty, tw, th, tscale, tobj, tcls)
+
+
+def _calc_obj_loss(output, obj, tobj, gt_box, batch_size, anchors, num_classes,
+ downsample, ignore_thresh, scale_x_y):
+ # A prediction bbox overlap any gt_bbox over ignore_thresh,
+ # objectness loss will be ignored, process as follows:
+ # 1. get pred bbox, which is same with YOLOv3 infer mode, use yolo_box here
+ # NOTE: img_size is set as 1.0 to get noramlized pred bbox
+ bbox, prob = fluid.layers.yolo_box(
+ x=output,
+ img_size=fluid.layers.ones(
+ shape=[batch_size, 2], dtype="int32"),
+ anchors=anchors,
+ class_num=num_classes,
+ conf_thresh=0.,
+ downsample_ratio=downsample,
+ clip_bbox=False,
+ scale_x_y=scale_x_y)
+ # 2. split pred bbox and gt bbox by sample, calculate IoU between pred bbox
+ # and gt bbox in each sample
+ if batch_size > 1:
+ preds = fluid.layers.split(bbox, batch_size, dim=0)
+ gts = fluid.layers.split(gt_box, batch_size, dim=0)
+ else:
+ preds = [bbox]
+ gts = [gt_box]
+ probs = [prob]
+ ious = []
+ for pred, gt in zip(preds, gts):
+
+ def box_xywh2xyxy(box):
+ x = box[:, 0]
+ y = box[:, 1]
+ w = box[:, 2]
+ h = box[:, 3]
+ return fluid.layers.stack(
+ [
+ x - w / 2.,
+ y - h / 2.,
+ x + w / 2.,
+ y + h / 2.,
+ ], axis=1)
+
+ pred = fluid.layers.squeeze(pred, axes=[0])
+ gt = box_xywh2xyxy(fluid.layers.squeeze(gt, axes=[0]))
+ ious.append(fluid.layers.iou_similarity(pred, gt))
+ iou = fluid.layers.stack(ious, axis=0)
+ # 3. Get iou_mask by IoU between gt bbox and prediction bbox,
+ # Get obj_mask by tobj(holds gt_score), calculate objectness loss
+ max_iou = fluid.layers.reduce_max(iou, dim=-1)
+ iou_mask = fluid.layers.cast(max_iou <= ignore_thresh, dtype="float32")
+ output_shape = fluid.layers.shape(output)
+ an_num = len(anchors) // 2
+ iou_mask = fluid.layers.reshape(iou_mask, (-1, an_num, output_shape[2],
+ output_shape[3]))
+ iou_mask.stop_gradient = True
+ # NOTE: tobj holds gt_score, obj_mask holds object existence mask
+ obj_mask = fluid.layers.cast(tobj > 0., dtype="float32")
+ obj_mask.stop_gradient = True
+ # For positive objectness grids, objectness loss should be calculated
+ # For negative objectness grids, objectness loss is calculated only iou_mask == 1.0
+ loss_obj = fluid.layers.sigmoid_cross_entropy_with_logits(obj, obj_mask)
+ loss_obj_pos = fluid.layers.reduce_sum(loss_obj * tobj, dim=[1, 2, 3])
+ loss_obj_neg = fluid.layers.reduce_sum(
+ loss_obj * (1.0 - obj_mask) * iou_mask, dim=[1, 2, 3])
+ return loss_obj_pos, loss_obj_neg
+
+
+def fine_grained_loss(output,
+ target,
+ gt_box,
+ batch_size,
+ num_classes,
+ anchors,
+ ignore_thresh,
+ downsample,
+ scale_x_y=1.,
+ eps=1e-10):
+ an_num = len(anchors) // 2
+ x, y, w, h, obj, cls = _split_output(output, an_num, num_classes)
+ tx, ty, tw, th, tscale, tobj, tcls = _split_target(target)
+
+ tscale_tobj = tscale * tobj
+
+ scale_x_y = scale_x_y
+
+ if (abs(scale_x_y - 1.0) < eps):
+ loss_x = fluid.layers.sigmoid_cross_entropy_with_logits(
+ x, tx) * tscale_tobj
+ loss_x = fluid.layers.reduce_sum(loss_x, dim=[1, 2, 3])
+ loss_y = fluid.layers.sigmoid_cross_entropy_with_logits(
+ y, ty) * tscale_tobj
+ loss_y = fluid.layers.reduce_sum(loss_y, dim=[1, 2, 3])
+ else:
+ dx = scale_x_y * fluid.layers.sigmoid(x) - 0.5 * (scale_x_y - 1.0)
+ dy = scale_x_y * fluid.layers.sigmoid(y) - 0.5 * (scale_x_y - 1.0)
+ loss_x = fluid.layers.abs(dx - tx) * tscale_tobj
+ loss_x = fluid.layers.reduce_sum(loss_x, dim=[1, 2, 3])
+ loss_y = fluid.layers.abs(dy - ty) * tscale_tobj
+ loss_y = fluid.layers.reduce_sum(loss_y, dim=[1, 2, 3])
+
+ # NOTE: we refined loss function of (w, h) as L1Loss
+ loss_w = fluid.layers.abs(w - tw) * tscale_tobj
+ loss_w = fluid.layers.reduce_sum(loss_w, dim=[1, 2, 3])
+ loss_h = fluid.layers.abs(h - th) * tscale_tobj
+ loss_h = fluid.layers.reduce_sum(loss_h, dim=[1, 2, 3])
+
+ loss_obj_pos, loss_obj_neg = _calc_obj_loss(
+ output, obj, tobj, gt_box, batch_size, anchors, num_classes, downsample,
+ ignore_thresh, scale_x_y)
+
+ loss_cls = fluid.layers.sigmoid_cross_entropy_with_logits(cls, tcls)
+ loss_cls = fluid.layers.elementwise_mul(loss_cls, tobj, axis=0)
+ loss_cls = fluid.layers.reduce_sum(loss_cls, dim=[1, 2, 3, 4])
+
+ loss_xys = fluid.layers.reduce_mean(loss_x + loss_y)
+ loss_whs = fluid.layers.reduce_mean(loss_w + loss_h)
+ loss_objs = fluid.layers.reduce_mean(loss_obj_pos + loss_obj_neg)
+ loss_clss = fluid.layers.reduce_mean(loss_cls)
+
+ losses_all = {
+ "loss_xy": fluid.layers.sum(loss_xys),
+ "loss_wh": fluid.layers.sum(loss_whs),
+ "loss_loc": fluid.layers.sum(loss_xys) + fluid.layers.sum(loss_whs),
+ "loss_obj": fluid.layers.sum(loss_objs),
+ "loss_cls": fluid.layers.sum(loss_clss),
+ }
+ return losses_all, x, y, tx, ty
+
+
+def gt2yolotarget(gt_bbox, gt_class, gt_score, anchors, mask, num_classes, size,
+ stride):
+ grid_h, grid_w = size
+ h, w = grid_h * stride, grid_w * stride
+ an_hw = np.array(anchors) / np.array([[w, h]])
+ target = np.zeros(
+ (len(mask), 6 + num_classes, grid_h, grid_w), dtype=np.float32)
+ for b in range(gt_bbox.shape[0]):
+ gx, gy, gw, gh = gt_bbox[b, :]
+ cls = gt_class[b]
+ score = gt_score[b]
+ if gw <= 0. or gh <= 0. or score <= 0.:
+ continue
+
+ # find best match anchor index
+ best_iou = 0.
+ best_idx = -1
+ for an_idx in range(an_hw.shape[0]):
+ iou = jaccard_overlap([0., 0., gw, gh],
+ [0., 0., an_hw[an_idx, 0], an_hw[an_idx, 1]])
+ if iou > best_iou:
+ best_iou = iou
+ best_idx = an_idx
+
+ gi = int(gx * grid_w)
+ gj = int(gy * grid_h)
+
+ # gtbox should be regresed in this layes if best match
+ # anchor index in anchor mask of this layer
+ if best_idx in mask:
+ best_n = mask.index(best_idx)
+
+ # x, y, w, h, scale
+ target[best_n, 0, gj, gi] = gx * grid_w - gi
+ target[best_n, 1, gj, gi] = gy * grid_h - gj
+ target[best_n, 2, gj, gi] = np.log(gw * w / anchors[best_idx][0])
+ target[best_n, 3, gj, gi] = np.log(gh * h / anchors[best_idx][1])
+ target[best_n, 4, gj, gi] = 2.0 - gw * gh
+
+ # objectness record gt_score
+ # if target[best_n, 5, gj, gi] > 0:
+ # print('find 1 duplicate')
+ target[best_n, 5, gj, gi] = score
+
+ # classification
+ target[best_n, 6 + cls, gj, gi] = 1.
+
+ return target
+
+
+class TestYolov3LossOp(unittest.TestCase):
+ def setUp(self):
+ self.initTestCase()
+ x = np.random.uniform(0, 1, self.x_shape).astype('float64')
+ gtbox = np.random.random(size=self.gtbox_shape).astype('float64')
+ gtlabel = np.random.randint(0, self.class_num, self.gtbox_shape[:2])
+ gtmask = np.random.randint(0, 2, self.gtbox_shape[:2])
+ gtbox = gtbox * gtmask[:, :, np.newaxis]
+ gtlabel = gtlabel * gtmask
+
+ gtscore = np.ones(self.gtbox_shape[:2]).astype('float64')
+ if self.gtscore:
+ gtscore = np.random.random(self.gtbox_shape[:2]).astype('float64')
+
+ target = []
+ for box, label, score in zip(gtbox, gtlabel, gtscore):
+ target.append(
+ gt2yolotarget(box, label, score, self.anchors, self.anchor_mask,
+ self.class_num, (self.h, self.w
+ ), self.downsample_ratio))
+
+ self.target = np.array(target).astype('float64')
+
+ self.mask_anchors = []
+ for i in self.anchor_mask:
+ self.mask_anchors.extend(self.anchors[i])
+ self.x = x
+ self.gtbox = gtbox
+ self.gtlabel = gtlabel
+ self.gtscore = gtscore
+
+ def initTestCase(self):
+ self.b = 8
+ self.h = 19
+ self.w = 19
+ self.anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
+ [59, 119], [116, 90], [156, 198], [373, 326]]
+ self.anchor_mask = [6, 7, 8]
+ self.na = len(self.anchor_mask)
+ self.class_num = 80
+ self.ignore_thresh = 0.7
+ self.downsample_ratio = 32
+ self.x_shape = (self.b, len(self.anchor_mask) * (5 + self.class_num),
+ self.h, self.w)
+ self.gtbox_shape = (self.b, 40, 4)
+ self.gtscore = True
+ self.use_label_smooth = False
+ self.scale_x_y = 1.
+
+ def test_loss(self):
+ x, gtbox, gtlabel, gtscore, target = self.x, self.gtbox, self.gtlabel, self.gtscore, self.target
+ yolo_loss = YOLOv3Loss(
+ ignore_thresh=self.ignore_thresh,
+ label_smooth=self.use_label_smooth,
+ num_classes=self.class_num,
+ downsample=self.downsample_ratio,
+ scale_x_y=self.scale_x_y)
+ x = paddle.to_tensor(x.astype(np.float32))
+ gtbox = paddle.to_tensor(gtbox.astype(np.float32))
+ gtlabel = paddle.to_tensor(gtlabel.astype(np.float32))
+ gtscore = paddle.to_tensor(gtscore.astype(np.float32))
+ t = paddle.to_tensor(target.astype(np.float32))
+ anchor = [self.anchors[i] for i in self.anchor_mask]
+ (yolo_loss1, px, py, tx, ty) = fine_grained_loss(
+ output=x,
+ target=t,
+ gt_box=gtbox,
+ batch_size=self.b,
+ num_classes=self.class_num,
+ anchors=self.mask_anchors,
+ ignore_thresh=self.ignore_thresh,
+ downsample=self.downsample_ratio,
+ scale_x_y=self.scale_x_y)
+ yolo_loss2 = yolo_loss.yolov3_loss(
+ x, t, gtbox, anchor, self.downsample_ratio, self.scale_x_y)
+ for k in yolo_loss2:
+ self.assertAlmostEqual(
+ yolo_loss1[k].numpy()[0],
+ yolo_loss2[k].numpy()[0],
+ delta=1e-2,
+ msg=k)
+
+
+class TestYolov3LossNoGTScore(TestYolov3LossOp):
+ def initTestCase(self):
+ self.b = 1
+ self.h = 76
+ self.w = 76
+ self.anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
+ [59, 119], [116, 90], [156, 198], [373, 326]]
+ self.anchor_mask = [0, 1, 2]
+ self.na = len(self.anchor_mask)
+ self.class_num = 80
+ self.ignore_thresh = 0.7
+ self.downsample_ratio = 8
+ self.x_shape = (self.b, len(self.anchor_mask) * (5 + self.class_num),
+ self.h, self.w)
+ self.gtbox_shape = (self.b, 40, 4)
+ self.gtscore = False
+ self.use_label_smooth = False
+ self.scale_x_y = 1.
+
+
+class TestYolov3LossWithScaleXY(TestYolov3LossOp):
+ def initTestCase(self):
+ self.b = 5
+ self.h = 38
+ self.w = 38
+ self.anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
+ [59, 119], [116, 90], [156, 198], [373, 326]]
+ self.anchor_mask = [3, 4, 5]
+ self.na = len(self.anchor_mask)
+ self.class_num = 80
+ self.ignore_thresh = 0.7
+ self.downsample_ratio = 16
+ self.x_shape = (self.b, len(self.anchor_mask) * (5 + self.class_num),
+ self.h, self.w)
+ self.gtbox_shape = (self.b, 40, 4)
+ self.gtscore = True
+ self.use_label_smooth = False
+ self.scale_x_y = 1.2
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__init__.py
new file mode 100644
index 000000000..4aed815d7
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import detr_transformer
+from . import utils
+from . import matchers
+from . import position_encoding
+from . import deformable_transformer
+
+from .detr_transformer import *
+from .utils import *
+from .matchers import *
+from .position_encoding import *
+from .deformable_transformer import *
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..fae862b10
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/deformable_transformer.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/deformable_transformer.cpython-37.pyc
new file mode 100644
index 000000000..75ee28c4b
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/deformable_transformer.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/detr_transformer.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/detr_transformer.cpython-37.pyc
new file mode 100644
index 000000000..11bce7aef
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/detr_transformer.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/matchers.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/matchers.cpython-37.pyc
new file mode 100644
index 000000000..fc05ed243
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/matchers.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/position_encoding.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/position_encoding.cpython-37.pyc
new file mode 100644
index 000000000..195c55c50
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/position_encoding.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 000000000..f243af8b9
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/__pycache__/utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/deformable_transformer.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/deformable_transformer.py
new file mode 100644
index 000000000..0c2089a8b
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/deformable_transformer.py
@@ -0,0 +1,517 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modified from Deformable-DETR (https://github.com/fundamentalvision/Deformable-DETR)
+# Copyright (c) 2020 SenseTime. All Rights Reserved.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+
+from ppdet.core.workspace import register
+from ..layers import MultiHeadAttention
+from .position_encoding import PositionEmbedding
+from .utils import _get_clones, deformable_attention_core_func
+from ..initializer import linear_init_, constant_, xavier_uniform_, normal_
+
+__all__ = ['DeformableTransformer']
+
+
+class MSDeformableAttention(nn.Layer):
+ def __init__(self,
+ embed_dim=256,
+ num_heads=8,
+ num_levels=4,
+ num_points=4,
+ lr_mult=0.1):
+ """
+ Multi-Scale Deformable Attention Module
+ """
+ super(MSDeformableAttention, self).__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.num_levels = num_levels
+ self.num_points = num_points
+ self.total_points = num_heads * num_levels * num_points
+
+ self.head_dim = embed_dim // num_heads
+ assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
+
+ self.sampling_offsets = nn.Linear(
+ embed_dim,
+ self.total_points * 2,
+ weight_attr=ParamAttr(learning_rate=lr_mult),
+ bias_attr=ParamAttr(learning_rate=lr_mult))
+
+ self.attention_weights = nn.Linear(embed_dim, self.total_points)
+ self.value_proj = nn.Linear(embed_dim, embed_dim)
+ self.output_proj = nn.Linear(embed_dim, embed_dim)
+
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ # sampling_offsets
+ constant_(self.sampling_offsets.weight)
+ thetas = paddle.arange(
+ self.num_heads,
+ dtype=paddle.float32) * (2.0 * math.pi / self.num_heads)
+ grid_init = paddle.stack([thetas.cos(), thetas.sin()], -1)
+ grid_init = grid_init / grid_init.abs().max(-1, keepdim=True)
+ grid_init = grid_init.reshape([self.num_heads, 1, 1, 2]).tile(
+ [1, self.num_levels, self.num_points, 1])
+ scaling = paddle.arange(
+ 1, self.num_points + 1,
+ dtype=paddle.float32).reshape([1, 1, -1, 1])
+ grid_init *= scaling
+ self.sampling_offsets.bias.set_value(grid_init.flatten())
+ # attention_weights
+ constant_(self.attention_weights.weight)
+ constant_(self.attention_weights.bias)
+ # proj
+ xavier_uniform_(self.value_proj.weight)
+ constant_(self.value_proj.bias)
+ xavier_uniform_(self.output_proj.weight)
+ constant_(self.output_proj.bias)
+
+ def forward(self,
+ query,
+ reference_points,
+ value,
+ value_spatial_shapes,
+ value_mask=None):
+ """
+ Args:
+ query (Tensor): [bs, query_length, C]
+ reference_points (Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),
+ bottom-right (1, 1), including padding area
+ value (Tensor): [bs, value_length, C]
+ value_spatial_shapes (Tensor): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
+ value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements
+
+ Returns:
+ output (Tensor): [bs, Length_{query}, C]
+ """
+ bs, Len_q = query.shape[:2]
+ Len_v = value.shape[1]
+ assert int(value_spatial_shapes.prod(1).sum()) == Len_v
+
+ value = self.value_proj(value)
+ if value_mask is not None:
+ value_mask = value_mask.astype(value.dtype).unsqueeze(-1)
+ value *= value_mask
+ value = value.reshape([bs, Len_v, self.num_heads, self.head_dim])
+
+ sampling_offsets = self.sampling_offsets(query).reshape(
+ [bs, Len_q, self.num_heads, self.num_levels, self.num_points, 2])
+ attention_weights = self.attention_weights(query).reshape(
+ [bs, Len_q, self.num_heads, self.num_levels * self.num_points])
+ attention_weights = F.softmax(attention_weights, -1).reshape(
+ [bs, Len_q, self.num_heads, self.num_levels, self.num_points])
+
+ offset_normalizer = value_spatial_shapes.flip([1]).reshape(
+ [1, 1, 1, self.num_levels, 1, 2])
+ sampling_locations = reference_points.reshape([
+ bs, Len_q, 1, self.num_levels, 1, 2
+ ]) + sampling_offsets / offset_normalizer
+
+ output = deformable_attention_core_func(
+ value, value_spatial_shapes, sampling_locations, attention_weights)
+ output = self.output_proj(output)
+
+ return output
+
+
+class DeformableTransformerEncoderLayer(nn.Layer):
+ def __init__(self,
+ d_model=256,
+ n_head=8,
+ dim_feedforward=1024,
+ dropout=0.1,
+ activation="relu",
+ n_levels=4,
+ n_points=4,
+ weight_attr=None,
+ bias_attr=None):
+ super(DeformableTransformerEncoderLayer, self).__init__()
+ # self attention
+ self.self_attn = MSDeformableAttention(d_model, n_head, n_levels,
+ n_points)
+ self.dropout1 = nn.Dropout(dropout)
+ self.norm1 = nn.LayerNorm(d_model)
+ # ffn
+ self.linear1 = nn.Linear(d_model, dim_feedforward, weight_attr,
+ bias_attr)
+ self.activation = getattr(F, activation)
+ self.dropout2 = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(dim_feedforward, d_model, weight_attr,
+ bias_attr)
+ self.dropout3 = nn.Dropout(dropout)
+ self.norm2 = nn.LayerNorm(d_model)
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ linear_init_(self.linear1)
+ linear_init_(self.linear2)
+ xavier_uniform_(self.linear1.weight)
+ xavier_uniform_(self.linear2.weight)
+
+ def with_pos_embed(self, tensor, pos):
+ return tensor if pos is None else tensor + pos
+
+ def forward_ffn(self, src):
+ src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
+ src = src + self.dropout3(src2)
+ src = self.norm2(src)
+ return src
+
+ def forward(self,
+ src,
+ reference_points,
+ spatial_shapes,
+ src_mask=None,
+ pos_embed=None):
+ # self attention
+ src2 = self.self_attn(
+ self.with_pos_embed(src, pos_embed), reference_points, src,
+ spatial_shapes, src_mask)
+ src = src + self.dropout1(src2)
+ src = self.norm1(src)
+ # ffn
+ src = self.forward_ffn(src)
+
+ return src
+
+
+class DeformableTransformerEncoder(nn.Layer):
+ def __init__(self, encoder_layer, num_layers):
+ super(DeformableTransformerEncoder, self).__init__()
+ self.layers = _get_clones(encoder_layer, num_layers)
+ self.num_layers = num_layers
+
+ @staticmethod
+ def get_reference_points(spatial_shapes, valid_ratios):
+ valid_ratios = valid_ratios.unsqueeze(1)
+ reference_points = []
+ for i, (H, W) in enumerate(spatial_shapes.tolist()):
+ ref_y, ref_x = paddle.meshgrid(
+ paddle.linspace(0.5, H - 0.5, H),
+ paddle.linspace(0.5, W - 0.5, W))
+ ref_y = ref_y.flatten().unsqueeze(0) / (valid_ratios[:, :, i, 1] *
+ H)
+ ref_x = ref_x.flatten().unsqueeze(0) / (valid_ratios[:, :, i, 0] *
+ W)
+ reference_points.append(paddle.stack((ref_x, ref_y), axis=-1))
+ reference_points = paddle.concat(reference_points, 1).unsqueeze(2)
+ reference_points = reference_points * valid_ratios
+ return reference_points
+
+ def forward(self,
+ src,
+ spatial_shapes,
+ src_mask=None,
+ pos_embed=None,
+ valid_ratios=None):
+ output = src
+ if valid_ratios is None:
+ valid_ratios = paddle.ones(
+ [src.shape[0], spatial_shapes.shape[0], 2])
+ reference_points = self.get_reference_points(spatial_shapes,
+ valid_ratios)
+ for layer in self.layers:
+ output = layer(output, reference_points, spatial_shapes, src_mask,
+ pos_embed)
+
+ return output
+
+
+class DeformableTransformerDecoderLayer(nn.Layer):
+ def __init__(self,
+ d_model=256,
+ n_head=8,
+ dim_feedforward=1024,
+ dropout=0.1,
+ activation="relu",
+ n_levels=4,
+ n_points=4,
+ weight_attr=None,
+ bias_attr=None):
+ super(DeformableTransformerDecoderLayer, self).__init__()
+
+ # self attention
+ self.self_attn = MultiHeadAttention(d_model, n_head, dropout=dropout)
+ self.dropout1 = nn.Dropout(dropout)
+ self.norm1 = nn.LayerNorm(d_model)
+
+ # cross attention
+ self.cross_attn = MSDeformableAttention(d_model, n_head, n_levels,
+ n_points)
+ self.dropout2 = nn.Dropout(dropout)
+ self.norm2 = nn.LayerNorm(d_model)
+
+ # ffn
+ self.linear1 = nn.Linear(d_model, dim_feedforward, weight_attr,
+ bias_attr)
+ self.activation = getattr(F, activation)
+ self.dropout3 = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(dim_feedforward, d_model, weight_attr,
+ bias_attr)
+ self.dropout4 = nn.Dropout(dropout)
+ self.norm3 = nn.LayerNorm(d_model)
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ linear_init_(self.linear1)
+ linear_init_(self.linear2)
+ xavier_uniform_(self.linear1.weight)
+ xavier_uniform_(self.linear2.weight)
+
+ def with_pos_embed(self, tensor, pos):
+ return tensor if pos is None else tensor + pos
+
+ def forward_ffn(self, tgt):
+ tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
+ tgt = tgt + self.dropout4(tgt2)
+ tgt = self.norm3(tgt)
+ return tgt
+
+ def forward(self,
+ tgt,
+ reference_points,
+ memory,
+ memory_spatial_shapes,
+ memory_mask=None,
+ query_pos_embed=None):
+ # self attention
+ q = k = self.with_pos_embed(tgt, query_pos_embed)
+ tgt2 = self.self_attn(q, k, value=tgt)
+ tgt = tgt + self.dropout1(tgt2)
+ tgt = self.norm1(tgt)
+
+ # cross attention
+ tgt2 = self.cross_attn(
+ self.with_pos_embed(tgt, query_pos_embed), reference_points, memory,
+ memory_spatial_shapes, memory_mask)
+ tgt = tgt + self.dropout2(tgt2)
+ tgt = self.norm2(tgt)
+
+ # ffn
+ tgt = self.forward_ffn(tgt)
+
+ return tgt
+
+
+class DeformableTransformerDecoder(nn.Layer):
+ def __init__(self, decoder_layer, num_layers, return_intermediate=False):
+ super(DeformableTransformerDecoder, self).__init__()
+ self.layers = _get_clones(decoder_layer, num_layers)
+ self.num_layers = num_layers
+ self.return_intermediate = return_intermediate
+
+ def forward(self,
+ tgt,
+ reference_points,
+ memory,
+ memory_spatial_shapes,
+ memory_mask=None,
+ query_pos_embed=None):
+ output = tgt
+ intermediate = []
+ for lid, layer in enumerate(self.layers):
+ output = layer(output, reference_points, memory,
+ memory_spatial_shapes, memory_mask, query_pos_embed)
+
+ if self.return_intermediate:
+ intermediate.append(output)
+
+ if self.return_intermediate:
+ return paddle.stack(intermediate)
+
+ return output.unsqueeze(0)
+
+
+@register
+class DeformableTransformer(nn.Layer):
+ __shared__ = ['hidden_dim']
+
+ def __init__(self,
+ num_queries=300,
+ position_embed_type='sine',
+ return_intermediate_dec=True,
+ backbone_num_channels=[512, 1024, 2048],
+ num_feature_levels=4,
+ num_encoder_points=4,
+ num_decoder_points=4,
+ hidden_dim=256,
+ nhead=8,
+ num_encoder_layers=6,
+ num_decoder_layers=6,
+ dim_feedforward=1024,
+ dropout=0.1,
+ activation="relu",
+ lr_mult=0.1,
+ weight_attr=None,
+ bias_attr=None):
+ super(DeformableTransformer, self).__init__()
+ assert position_embed_type in ['sine', 'learned'], \
+ f'ValueError: position_embed_type not supported {position_embed_type}!'
+ assert len(backbone_num_channels) <= num_feature_levels
+
+ self.hidden_dim = hidden_dim
+ self.nhead = nhead
+ self.num_feature_levels = num_feature_levels
+
+ encoder_layer = DeformableTransformerEncoderLayer(
+ hidden_dim, nhead, dim_feedforward, dropout, activation,
+ num_feature_levels, num_encoder_points, weight_attr, bias_attr)
+ self.encoder = DeformableTransformerEncoder(encoder_layer,
+ num_encoder_layers)
+
+ decoder_layer = DeformableTransformerDecoderLayer(
+ hidden_dim, nhead, dim_feedforward, dropout, activation,
+ num_feature_levels, num_decoder_points, weight_attr, bias_attr)
+ self.decoder = DeformableTransformerDecoder(
+ decoder_layer, num_decoder_layers, return_intermediate_dec)
+
+ self.level_embed = nn.Embedding(num_feature_levels, hidden_dim)
+ self.tgt_embed = nn.Embedding(num_queries, hidden_dim)
+ self.query_pos_embed = nn.Embedding(num_queries, hidden_dim)
+
+ self.reference_points = nn.Linear(
+ hidden_dim,
+ 2,
+ weight_attr=ParamAttr(learning_rate=lr_mult),
+ bias_attr=ParamAttr(learning_rate=lr_mult))
+
+ self.input_proj = nn.LayerList()
+ for in_channels in backbone_num_channels:
+ self.input_proj.append(
+ nn.Sequential(
+ nn.Conv2D(
+ in_channels,
+ hidden_dim,
+ kernel_size=1,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr),
+ nn.GroupNorm(32, hidden_dim)))
+ in_channels = backbone_num_channels[-1]
+ for _ in range(num_feature_levels - len(backbone_num_channels)):
+ self.input_proj.append(
+ nn.Sequential(
+ nn.Conv2D(
+ in_channels,
+ hidden_dim,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr),
+ nn.GroupNorm(32, hidden_dim)))
+ in_channels = hidden_dim
+
+ self.position_embedding = PositionEmbedding(
+ hidden_dim // 2,
+ normalize=True if position_embed_type == 'sine' else False,
+ embed_type=position_embed_type,
+ offset=-0.5)
+
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ normal_(self.level_embed.weight)
+ normal_(self.tgt_embed.weight)
+ normal_(self.query_pos_embed.weight)
+ xavier_uniform_(self.reference_points.weight)
+ constant_(self.reference_points.bias)
+ for l in self.input_proj:
+ xavier_uniform_(l[0].weight)
+ constant_(l[0].bias)
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {'backbone_num_channels': [i.channels for i in input_shape], }
+
+ def _get_valid_ratio(self, mask):
+ mask = mask.astype(paddle.float32)
+ _, H, W = mask.shape
+ valid_ratio_h = paddle.sum(mask[:, :, 0], 1) / H
+ valid_ratio_w = paddle.sum(mask[:, 0, :], 1) / W
+ valid_ratio = paddle.stack([valid_ratio_w, valid_ratio_h], -1)
+ return valid_ratio
+
+ def forward(self, src_feats, src_mask=None):
+ srcs = []
+ for i in range(len(src_feats)):
+ srcs.append(self.input_proj[i](src_feats[i]))
+ if self.num_feature_levels > len(srcs):
+ len_srcs = len(srcs)
+ for i in range(len_srcs, self.num_feature_levels):
+ if i == len_srcs:
+ srcs.append(self.input_proj[i](src_feats[-1]))
+ else:
+ srcs.append(self.input_proj[i](srcs[-1]))
+ src_flatten = []
+ mask_flatten = []
+ lvl_pos_embed_flatten = []
+ spatial_shapes = []
+ valid_ratios = []
+ for level, src in enumerate(srcs):
+ bs, c, h, w = src.shape
+ spatial_shapes.append([h, w])
+ src = src.flatten(2).transpose([0, 2, 1])
+ src_flatten.append(src)
+ if src_mask is not None:
+ mask = F.interpolate(
+ src_mask.unsqueeze(0).astype(src.dtype),
+ size=(h, w))[0].astype('bool')
+ else:
+ mask = paddle.ones([bs, h, w], dtype='bool')
+ valid_ratios.append(self._get_valid_ratio(mask))
+ pos_embed = self.position_embedding(mask).flatten(2).transpose(
+ [0, 2, 1])
+ lvl_pos_embed = pos_embed + self.level_embed.weight[level].reshape(
+ [1, 1, -1])
+ lvl_pos_embed_flatten.append(lvl_pos_embed)
+ mask = mask.astype(src.dtype).flatten(1)
+ mask_flatten.append(mask)
+ src_flatten = paddle.concat(src_flatten, 1)
+ mask_flatten = paddle.concat(mask_flatten, 1)
+ lvl_pos_embed_flatten = paddle.concat(lvl_pos_embed_flatten, 1)
+ # [l, 2]
+ spatial_shapes = paddle.to_tensor(spatial_shapes, dtype='int64')
+ # [b, l, 2]
+ valid_ratios = paddle.stack(valid_ratios, 1)
+
+ # encoder
+ memory = self.encoder(src_flatten, spatial_shapes, mask_flatten,
+ lvl_pos_embed_flatten, valid_ratios)
+
+ # prepare input for decoder
+ bs, _, c = memory.shape
+ query_embed = self.query_pos_embed.weight.unsqueeze(0).tile([bs, 1, 1])
+ tgt = self.tgt_embed.weight.unsqueeze(0).tile([bs, 1, 1])
+ reference_points = F.sigmoid(self.reference_points(query_embed))
+ reference_points_input = reference_points.unsqueeze(
+ 2) * valid_ratios.unsqueeze(1)
+
+ # decoder
+ hs = self.decoder(tgt, reference_points_input, memory, spatial_shapes,
+ mask_flatten, query_embed)
+
+ return (hs, memory, reference_points)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/detr_transformer.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/detr_transformer.py
new file mode 100644
index 000000000..bd513772d
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/detr_transformer.py
@@ -0,0 +1,353 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modified from DETR (https://github.com/facebookresearch/detr)
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+
+from ppdet.core.workspace import register
+from ..layers import MultiHeadAttention, _convert_attention_mask
+from .position_encoding import PositionEmbedding
+from .utils import _get_clones
+from ..initializer import linear_init_, conv_init_, xavier_uniform_, normal_
+
+__all__ = ['DETRTransformer']
+
+
+class TransformerEncoderLayer(nn.Layer):
+ def __init__(self,
+ d_model,
+ nhead,
+ dim_feedforward=2048,
+ dropout=0.1,
+ activation="relu",
+ attn_dropout=None,
+ act_dropout=None,
+ normalize_before=False):
+ super(TransformerEncoderLayer, self).__init__()
+ attn_dropout = dropout if attn_dropout is None else attn_dropout
+ act_dropout = dropout if act_dropout is None else act_dropout
+ self.normalize_before = normalize_before
+
+ self.self_attn = MultiHeadAttention(d_model, nhead, attn_dropout)
+ # Implementation of Feedforward model
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
+ self.dropout = nn.Dropout(act_dropout, mode="upscale_in_train")
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
+
+ self.norm1 = nn.LayerNorm(d_model)
+ self.norm2 = nn.LayerNorm(d_model)
+ self.dropout1 = nn.Dropout(dropout, mode="upscale_in_train")
+ self.dropout2 = nn.Dropout(dropout, mode="upscale_in_train")
+ self.activation = getattr(F, activation)
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ linear_init_(self.linear1)
+ linear_init_(self.linear2)
+
+ @staticmethod
+ def with_pos_embed(tensor, pos_embed):
+ return tensor if pos_embed is None else tensor + pos_embed
+
+ def forward(self, src, src_mask=None, pos_embed=None):
+ src_mask = _convert_attention_mask(src_mask, src.dtype)
+
+ residual = src
+ if self.normalize_before:
+ src = self.norm1(src)
+ q = k = self.with_pos_embed(src, pos_embed)
+ src = self.self_attn(q, k, value=src, attn_mask=src_mask)
+
+ src = residual + self.dropout1(src)
+ if not self.normalize_before:
+ src = self.norm1(src)
+
+ residual = src
+ if self.normalize_before:
+ src = self.norm2(src)
+ src = self.linear2(self.dropout(self.activation(self.linear1(src))))
+ src = residual + self.dropout2(src)
+ if not self.normalize_before:
+ src = self.norm2(src)
+ return src
+
+
+class TransformerEncoder(nn.Layer):
+ def __init__(self, encoder_layer, num_layers, norm=None):
+ super(TransformerEncoder, self).__init__()
+ self.layers = _get_clones(encoder_layer, num_layers)
+ self.num_layers = num_layers
+ self.norm = norm
+
+ def forward(self, src, src_mask=None, pos_embed=None):
+ src_mask = _convert_attention_mask(src_mask, src.dtype)
+
+ output = src
+ for layer in self.layers:
+ output = layer(output, src_mask=src_mask, pos_embed=pos_embed)
+
+ if self.norm is not None:
+ output = self.norm(output)
+
+ return output
+
+
+class TransformerDecoderLayer(nn.Layer):
+ def __init__(self,
+ d_model,
+ nhead,
+ dim_feedforward=2048,
+ dropout=0.1,
+ activation="relu",
+ attn_dropout=None,
+ act_dropout=None,
+ normalize_before=False):
+ super(TransformerDecoderLayer, self).__init__()
+ attn_dropout = dropout if attn_dropout is None else attn_dropout
+ act_dropout = dropout if act_dropout is None else act_dropout
+ self.normalize_before = normalize_before
+
+ self.self_attn = MultiHeadAttention(d_model, nhead, attn_dropout)
+ self.cross_attn = MultiHeadAttention(d_model, nhead, attn_dropout)
+ # Implementation of Feedforward model
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
+ self.dropout = nn.Dropout(act_dropout, mode="upscale_in_train")
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
+
+ self.norm1 = nn.LayerNorm(d_model)
+ self.norm2 = nn.LayerNorm(d_model)
+ self.norm3 = nn.LayerNorm(d_model)
+ self.dropout1 = nn.Dropout(dropout, mode="upscale_in_train")
+ self.dropout2 = nn.Dropout(dropout, mode="upscale_in_train")
+ self.dropout3 = nn.Dropout(dropout, mode="upscale_in_train")
+ self.activation = getattr(F, activation)
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ linear_init_(self.linear1)
+ linear_init_(self.linear2)
+
+ @staticmethod
+ def with_pos_embed(tensor, pos_embed):
+ return tensor if pos_embed is None else tensor + pos_embed
+
+ def forward(self,
+ tgt,
+ memory,
+ tgt_mask=None,
+ memory_mask=None,
+ pos_embed=None,
+ query_pos_embed=None):
+ tgt_mask = _convert_attention_mask(tgt_mask, tgt.dtype)
+ memory_mask = _convert_attention_mask(memory_mask, memory.dtype)
+
+ residual = tgt
+ if self.normalize_before:
+ tgt = self.norm1(tgt)
+ q = k = self.with_pos_embed(tgt, query_pos_embed)
+ tgt = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask)
+ tgt = residual + self.dropout1(tgt)
+ if not self.normalize_before:
+ tgt = self.norm1(tgt)
+
+ residual = tgt
+ if self.normalize_before:
+ tgt = self.norm2(tgt)
+ q = self.with_pos_embed(tgt, query_pos_embed)
+ k = self.with_pos_embed(memory, pos_embed)
+ tgt = self.cross_attn(q, k, value=memory, attn_mask=memory_mask)
+ tgt = residual + self.dropout2(tgt)
+ if not self.normalize_before:
+ tgt = self.norm2(tgt)
+
+ residual = tgt
+ if self.normalize_before:
+ tgt = self.norm3(tgt)
+ tgt = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
+ tgt = residual + self.dropout3(tgt)
+ if not self.normalize_before:
+ tgt = self.norm3(tgt)
+ return tgt
+
+
+class TransformerDecoder(nn.Layer):
+ def __init__(self,
+ decoder_layer,
+ num_layers,
+ norm=None,
+ return_intermediate=False):
+ super(TransformerDecoder, self).__init__()
+ self.layers = _get_clones(decoder_layer, num_layers)
+ self.num_layers = num_layers
+ self.norm = norm
+ self.return_intermediate = return_intermediate
+
+ def forward(self,
+ tgt,
+ memory,
+ tgt_mask=None,
+ memory_mask=None,
+ pos_embed=None,
+ query_pos_embed=None):
+ tgt_mask = _convert_attention_mask(tgt_mask, tgt.dtype)
+ memory_mask = _convert_attention_mask(memory_mask, memory.dtype)
+
+ output = tgt
+ intermediate = []
+ for layer in self.layers:
+ output = layer(
+ output,
+ memory,
+ tgt_mask=tgt_mask,
+ memory_mask=memory_mask,
+ pos_embed=pos_embed,
+ query_pos_embed=query_pos_embed)
+ if self.return_intermediate:
+ intermediate.append(self.norm(output))
+
+ if self.norm is not None:
+ output = self.norm(output)
+
+ if self.return_intermediate:
+ return paddle.stack(intermediate)
+
+ return output.unsqueeze(0)
+
+
+@register
+class DETRTransformer(nn.Layer):
+ __shared__ = ['hidden_dim']
+
+ def __init__(self,
+ num_queries=100,
+ position_embed_type='sine',
+ return_intermediate_dec=True,
+ backbone_num_channels=2048,
+ hidden_dim=256,
+ nhead=8,
+ num_encoder_layers=6,
+ num_decoder_layers=6,
+ dim_feedforward=2048,
+ dropout=0.1,
+ activation="relu",
+ attn_dropout=None,
+ act_dropout=None,
+ normalize_before=False):
+ super(DETRTransformer, self).__init__()
+ assert position_embed_type in ['sine', 'learned'],\
+ f'ValueError: position_embed_type not supported {position_embed_type}!'
+ self.hidden_dim = hidden_dim
+ self.nhead = nhead
+
+ encoder_layer = TransformerEncoderLayer(
+ hidden_dim, nhead, dim_feedforward, dropout, activation,
+ attn_dropout, act_dropout, normalize_before)
+ encoder_norm = nn.LayerNorm(hidden_dim) if normalize_before else None
+ self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers,
+ encoder_norm)
+
+ decoder_layer = TransformerDecoderLayer(
+ hidden_dim, nhead, dim_feedforward, dropout, activation,
+ attn_dropout, act_dropout, normalize_before)
+ decoder_norm = nn.LayerNorm(hidden_dim)
+ self.decoder = TransformerDecoder(
+ decoder_layer,
+ num_decoder_layers,
+ decoder_norm,
+ return_intermediate=return_intermediate_dec)
+
+ self.input_proj = nn.Conv2D(
+ backbone_num_channels, hidden_dim, kernel_size=1)
+ self.query_pos_embed = nn.Embedding(num_queries, hidden_dim)
+ self.position_embedding = PositionEmbedding(
+ hidden_dim // 2,
+ normalize=True if position_embed_type == 'sine' else False,
+ embed_type=position_embed_type)
+
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ for p in self.parameters():
+ if p.dim() > 1:
+ xavier_uniform_(p)
+ conv_init_(self.input_proj)
+ normal_(self.query_pos_embed.weight)
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ return {
+ 'backbone_num_channels': [i.channels for i in input_shape][-1],
+ }
+
+ def forward(self, src, src_mask=None):
+ r"""
+ Applies a Transformer model on the inputs.
+
+ Parameters:
+ src (List(Tensor)): Backbone feature maps with shape [[bs, c, h, w]].
+ src_mask (Tensor, optional): A tensor used in multi-head attention
+ to prevents attention to some unwanted positions, usually the
+ paddings or the subsequent positions. It is a tensor with shape
+ [bs, H, W]`. When the data type is bool, the unwanted positions
+ have `False` values and the others have `True` values. When the
+ data type is int, the unwanted positions have 0 values and the
+ others have 1 values. When the data type is float, the unwanted
+ positions have `-INF` values and the others have 0 values. It
+ can be None when nothing wanted or needed to be prevented
+ attention to. Default None.
+
+ Returns:
+ output (Tensor): [num_levels, batch_size, num_queries, hidden_dim]
+ memory (Tensor): [batch_size, hidden_dim, h, w]
+ """
+ # use last level feature map
+ src_proj = self.input_proj(src[-1])
+ bs, c, h, w = src_proj.shape
+ # flatten [B, C, H, W] to [B, HxW, C]
+ src_flatten = src_proj.flatten(2).transpose([0, 2, 1])
+ if src_mask is not None:
+ src_mask = F.interpolate(
+ src_mask.unsqueeze(0).astype(src_flatten.dtype),
+ size=(h, w))[0].astype('bool')
+ else:
+ src_mask = paddle.ones([bs, h, w], dtype='bool')
+ pos_embed = self.position_embedding(src_mask).flatten(2).transpose(
+ [0, 2, 1])
+
+ src_mask = _convert_attention_mask(src_mask, src_flatten.dtype)
+ src_mask = src_mask.reshape([bs, 1, 1, -1])
+
+ memory = self.encoder(
+ src_flatten, src_mask=src_mask, pos_embed=pos_embed)
+
+ query_pos_embed = self.query_pos_embed.weight.unsqueeze(0).tile(
+ [bs, 1, 1])
+ tgt = paddle.zeros_like(query_pos_embed)
+ output = self.decoder(
+ tgt,
+ memory,
+ memory_mask=src_mask,
+ pos_embed=pos_embed,
+ query_pos_embed=query_pos_embed)
+
+ return (output, memory.transpose([0, 2, 1]).reshape([bs, c, h, w]),
+ src_proj, src_mask.reshape([bs, 1, 1, h, w]))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/matchers.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/matchers.py
new file mode 100644
index 000000000..794d86328
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/matchers.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modified from DETR (https://github.com/facebookresearch/detr)
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from scipy.optimize import linear_sum_assignment
+
+from ppdet.core.workspace import register, serializable
+from ..losses.iou_loss import GIoULoss
+from .utils import bbox_cxcywh_to_xyxy
+
+__all__ = ['HungarianMatcher']
+
+
+@register
+@serializable
+class HungarianMatcher(nn.Layer):
+ __shared__ = ['use_focal_loss']
+
+ def __init__(self,
+ matcher_coeff={'class': 1,
+ 'bbox': 5,
+ 'giou': 2},
+ use_focal_loss=False,
+ alpha=0.25,
+ gamma=2.0):
+ r"""
+ Args:
+ matcher_coeff (dict): The coefficient of hungarian matcher cost.
+ """
+ super(HungarianMatcher, self).__init__()
+ self.matcher_coeff = matcher_coeff
+ self.use_focal_loss = use_focal_loss
+ self.alpha = alpha
+ self.gamma = gamma
+
+ self.giou_loss = GIoULoss()
+
+ def forward(self, boxes, logits, gt_bbox, gt_class):
+ r"""
+ Args:
+ boxes (Tensor): [b, query, 4]
+ logits (Tensor): [b, query, num_classes]
+ gt_bbox (List(Tensor)): list[[n, 4]]
+ gt_class (List(Tensor)): list[[n, 1]]
+
+ Returns:
+ A list of size batch_size, containing tuples of (index_i, index_j) where:
+ - index_i is the indices of the selected predictions (in order)
+ - index_j is the indices of the corresponding selected targets (in order)
+ For each batch element, it holds:
+ len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
+ """
+ bs, num_queries = boxes.shape[:2]
+
+ num_gts = sum(len(a) for a in gt_class)
+ if num_gts == 0:
+ return [(paddle.to_tensor(
+ [], dtype=paddle.int64), paddle.to_tensor(
+ [], dtype=paddle.int64)) for _ in range(bs)]
+
+ # We flatten to compute the cost matrices in a batch
+ # [batch_size * num_queries, num_classes]
+ out_prob = F.sigmoid(logits.flatten(
+ 0, 1)) if self.use_focal_loss else F.softmax(logits.flatten(0, 1))
+ # [batch_size * num_queries, 4]
+ out_bbox = boxes.flatten(0, 1)
+
+ # Also concat the target labels and boxes
+ tgt_ids = paddle.concat(gt_class).flatten()
+ tgt_bbox = paddle.concat(gt_bbox)
+
+ # Compute the classification cost
+ if self.use_focal_loss:
+ neg_cost_class = (1 - self.alpha) * (out_prob**self.gamma) * (-(
+ 1 - out_prob + 1e-8).log())
+ pos_cost_class = self.alpha * (
+ (1 - out_prob)**self.gamma) * (-(out_prob + 1e-8).log())
+ cost_class = paddle.gather(
+ pos_cost_class, tgt_ids, axis=1) - paddle.gather(
+ neg_cost_class, tgt_ids, axis=1)
+ else:
+ cost_class = -paddle.gather(out_prob, tgt_ids, axis=1)
+
+ # Compute the L1 cost between boxes
+ cost_bbox = (
+ out_bbox.unsqueeze(1) - tgt_bbox.unsqueeze(0)).abs().sum(-1)
+
+ # Compute the giou cost betwen boxes
+ cost_giou = self.giou_loss(
+ bbox_cxcywh_to_xyxy(out_bbox.unsqueeze(1)),
+ bbox_cxcywh_to_xyxy(tgt_bbox.unsqueeze(0))).squeeze(-1)
+
+ # Final cost matrix
+ C = self.matcher_coeff['class'] * cost_class + self.matcher_coeff['bbox'] * cost_bbox + \
+ self.matcher_coeff['giou'] * cost_giou
+ C = C.reshape([bs, num_queries, -1])
+ C = [a.squeeze(0) for a in C.chunk(bs)]
+
+ sizes = [a.shape[0] for a in gt_bbox]
+ indices = [
+ linear_sum_assignment(c.split(sizes, -1)[i].numpy())
+ for i, c in enumerate(C)
+ ]
+ return [(paddle.to_tensor(
+ i, dtype=paddle.int64), paddle.to_tensor(
+ j, dtype=paddle.int64)) for i, j in indices]
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/position_encoding.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/position_encoding.py
new file mode 100644
index 000000000..e54165918
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/position_encoding.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modified from DETR (https://github.com/facebookresearch/detr)
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import paddle
+import paddle.nn as nn
+
+from ppdet.core.workspace import register, serializable
+
+
+@register
+@serializable
+class PositionEmbedding(nn.Layer):
+ def __init__(self,
+ num_pos_feats=128,
+ temperature=10000,
+ normalize=True,
+ scale=None,
+ embed_type='sine',
+ num_embeddings=50,
+ offset=0.):
+ super(PositionEmbedding, self).__init__()
+ assert embed_type in ['sine', 'learned']
+
+ self.embed_type = embed_type
+ self.offset = offset
+ self.eps = 1e-6
+ if self.embed_type == 'sine':
+ self.num_pos_feats = num_pos_feats
+ self.temperature = temperature
+ self.normalize = normalize
+ if scale is not None and normalize is False:
+ raise ValueError("normalize should be True if scale is passed")
+ if scale is None:
+ scale = 2 * math.pi
+ self.scale = scale
+ elif self.embed_type == 'learned':
+ self.row_embed = nn.Embedding(num_embeddings, num_pos_feats)
+ self.col_embed = nn.Embedding(num_embeddings, num_pos_feats)
+ else:
+ raise ValueError(f"not supported {self.embed_type}")
+
+ def forward(self, mask):
+ """
+ Args:
+ mask (Tensor): [B, H, W]
+ Returns:
+ pos (Tensor): [B, C, H, W]
+ """
+ assert mask.dtype == paddle.bool
+ if self.embed_type == 'sine':
+ mask = mask.astype('float32')
+ y_embed = mask.cumsum(1, dtype='float32')
+ x_embed = mask.cumsum(2, dtype='float32')
+ if self.normalize:
+ y_embed = (y_embed + self.offset) / (
+ y_embed[:, -1:, :] + self.eps) * self.scale
+ x_embed = (x_embed + self.offset) / (
+ x_embed[:, :, -1:] + self.eps) * self.scale
+
+ dim_t = 2 * (paddle.arange(self.num_pos_feats) //
+ 2).astype('float32')
+ dim_t = self.temperature**(dim_t / self.num_pos_feats)
+
+ pos_x = x_embed.unsqueeze(-1) / dim_t
+ pos_y = y_embed.unsqueeze(-1) / dim_t
+ pos_x = paddle.stack(
+ (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()),
+ axis=4).flatten(3)
+ pos_y = paddle.stack(
+ (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()),
+ axis=4).flatten(3)
+ pos = paddle.concat((pos_y, pos_x), axis=3).transpose([0, 3, 1, 2])
+ return pos
+ elif self.embed_type == 'learned':
+ h, w = mask.shape[-2:]
+ i = paddle.arange(w)
+ j = paddle.arange(h)
+ x_emb = self.col_embed(i)
+ y_emb = self.row_embed(j)
+ pos = paddle.concat(
+ [
+ x_emb.unsqueeze(0).repeat(h, 1, 1),
+ y_emb.unsqueeze(1).repeat(1, w, 1),
+ ],
+ axis=-1).transpose([2, 0, 1]).unsqueeze(0).tile(mask.shape[0],
+ 1, 1, 1)
+ return pos
+ else:
+ raise ValueError(f"not supported {self.embed_type}")
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/utils.py
new file mode 100644
index 000000000..414ada588
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/modeling/transformers/utils.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modified from DETR (https://github.com/facebookresearch/detr)
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import copy
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+
+from ..bbox_utils import bbox_overlaps
+
+__all__ = [
+ '_get_clones', 'bbox_overlaps', 'bbox_cxcywh_to_xyxy',
+ 'bbox_xyxy_to_cxcywh', 'sigmoid_focal_loss', 'inverse_sigmoid',
+ 'deformable_attention_core_func'
+]
+
+
+def _get_clones(module, N):
+ return nn.LayerList([copy.deepcopy(module) for _ in range(N)])
+
+
+def bbox_cxcywh_to_xyxy(x):
+ x_c, y_c, w, h = x.unbind(-1)
+ b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
+ return paddle.stack(b, axis=-1)
+
+
+def bbox_xyxy_to_cxcywh(x):
+ x0, y0, x1, y1 = x.unbind(-1)
+ b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
+ return paddle.stack(b, axis=-1)
+
+
+def sigmoid_focal_loss(logit, label, normalizer=1.0, alpha=0.25, gamma=2.0):
+ prob = F.sigmoid(logit)
+ ce_loss = F.binary_cross_entropy_with_logits(logit, label, reduction="none")
+ p_t = prob * label + (1 - prob) * (1 - label)
+ loss = ce_loss * ((1 - p_t)**gamma)
+
+ if alpha >= 0:
+ alpha_t = alpha * label + (1 - alpha) * (1 - label)
+ loss = alpha_t * loss
+ return loss.mean(1).sum() / normalizer
+
+
+def inverse_sigmoid(x, eps=1e-6):
+ x = x.clip(min=0., max=1.)
+ return paddle.log(x / (1 - x + eps) + eps)
+
+
+def deformable_attention_core_func(value, value_spatial_shapes,
+ sampling_locations, attention_weights):
+ """
+ Args:
+ value (Tensor): [bs, value_length, n_head, c]
+ value_spatial_shapes (Tensor): [n_levels, 2]
+ sampling_locations (Tensor): [bs, query_length, n_head, n_levels, n_points, 2]
+ attention_weights (Tensor): [bs, query_length, n_head, n_levels, n_points]
+
+ Returns:
+ output (Tensor): [bs, Length_{query}, C]
+ """
+ bs, Len_v, n_head, c = value.shape
+ _, Len_q, n_head, n_levels, n_points, _ = sampling_locations.shape
+
+ value_list = value.split(value_spatial_shapes.prod(1).tolist(), axis=1)
+ sampling_grids = 2 * sampling_locations - 1
+ sampling_value_list = []
+ for level, (h, w) in enumerate(value_spatial_shapes.tolist()):
+ # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
+ value_l_ = value_list[level].flatten(2).transpose(
+ [0, 2, 1]).reshape([bs * n_head, c, h, w])
+ # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
+ sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(
+ [0, 2, 1, 3, 4]).flatten(0, 1)
+ # N_*M_, D_, Lq_, P_
+ sampling_value_l_ = F.grid_sample(
+ value_l_,
+ sampling_grid_l_,
+ mode='bilinear',
+ padding_mode='zeros',
+ align_corners=False)
+ sampling_value_list.append(sampling_value_l_)
+ # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_*M_, 1, Lq_, L_*P_)
+ attention_weights = attention_weights.transpose([0, 2, 1, 3, 4]).reshape(
+ [bs * n_head, 1, Len_q, n_levels * n_points])
+ output = (paddle.stack(
+ sampling_value_list, axis=-2).flatten(-2) *
+ attention_weights).sum(-1).reshape([bs, n_head * c, Len_q])
+
+ return output.transpose([0, 2, 1])
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/optimizer.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/optimizer.py
new file mode 100644
index 000000000..fcdcbd8d6
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/optimizer.py
@@ -0,0 +1,333 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import paddle
+import paddle.nn as nn
+
+import paddle.optimizer as optimizer
+import paddle.regularizer as regularizer
+
+from ppdet.core.workspace import register, serializable
+
+__all__ = ['LearningRate', 'OptimizerBuilder']
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+@serializable
+class CosineDecay(object):
+ """
+ Cosine learning rate decay
+
+ Args:
+ max_epochs (int): max epochs for the training process.
+ if you commbine cosine decay with warmup, it is recommended that
+ the max_iters is much larger than the warmup iter
+ """
+
+ def __init__(self, max_epochs=1000, use_warmup=True):
+ self.max_epochs = max_epochs
+ self.use_warmup = use_warmup
+
+ def __call__(self,
+ base_lr=None,
+ boundary=None,
+ value=None,
+ step_per_epoch=None):
+ assert base_lr is not None, "either base LR or values should be provided"
+
+ max_iters = self.max_epochs * int(step_per_epoch)
+
+ if boundary is not None and value is not None and self.use_warmup:
+ for i in range(int(boundary[-1]), max_iters):
+ boundary.append(i)
+
+ decayed_lr = base_lr * 0.5 * (
+ math.cos(i * math.pi / max_iters) + 1)
+ value.append(decayed_lr)
+ return optimizer.lr.PiecewiseDecay(boundary, value)
+
+ return optimizer.lr.CosineAnnealingDecay(base_lr, T_max=max_iters)
+
+
+@serializable
+class PiecewiseDecay(object):
+ """
+ Multi step learning rate decay
+
+ Args:
+ gamma (float | list): decay factor
+ milestones (list): steps at which to decay learning rate
+ """
+
+ def __init__(self,
+ gamma=[0.1, 0.01],
+ milestones=[8, 11],
+ values=None,
+ use_warmup=True):
+ super(PiecewiseDecay, self).__init__()
+ if type(gamma) is not list:
+ self.gamma = []
+ for i in range(len(milestones)):
+ self.gamma.append(gamma / 10**i)
+ else:
+ self.gamma = gamma
+ self.milestones = milestones
+ self.values = values
+ self.use_warmup = use_warmup
+
+ def __call__(self,
+ base_lr=None,
+ boundary=None,
+ value=None,
+ step_per_epoch=None):
+ if boundary is not None and self.use_warmup:
+ boundary.extend([int(step_per_epoch) * i for i in self.milestones])
+ else:
+ # do not use LinearWarmup
+ boundary = [int(step_per_epoch) * i for i in self.milestones]
+ value = [base_lr] # during step[0, boundary[0]] is base_lr
+
+ # self.values is setted directly in config
+ if self.values is not None:
+ assert len(self.milestones) + 1 == len(self.values)
+ return optimizer.lr.PiecewiseDecay(boundary, self.values)
+
+ # value is computed by self.gamma
+ value = value if value is not None else [base_lr]
+ for i in self.gamma:
+ value.append(base_lr * i)
+
+ return optimizer.lr.PiecewiseDecay(boundary, value)
+
+
+@serializable
+class LinearWarmup(object):
+ """
+ Warm up learning rate linearly
+
+ Args:
+ steps (int): warm up steps
+ start_factor (float): initial learning rate factor
+ """
+
+ def __init__(self, steps=500, start_factor=1. / 3):
+ super(LinearWarmup, self).__init__()
+ self.steps = steps
+ self.start_factor = start_factor
+
+ def __call__(self, base_lr, step_per_epoch):
+ boundary = []
+ value = []
+ for i in range(self.steps + 1):
+ if self.steps > 0:
+ alpha = i / self.steps
+ factor = self.start_factor * (1 - alpha) + alpha
+ lr = base_lr * factor
+ value.append(lr)
+ if i > 0:
+ boundary.append(i)
+ return boundary, value
+
+
+@serializable
+class BurninWarmup(object):
+ """
+ Warm up learning rate in burnin mode
+ Args:
+ steps (int): warm up steps
+ """
+
+ def __init__(self, steps=1000):
+ super(BurninWarmup, self).__init__()
+ self.steps = steps
+
+ def __call__(self, base_lr, step_per_epoch):
+ boundary = []
+ value = []
+ burnin = min(self.steps, step_per_epoch)
+ for i in range(burnin + 1):
+ factor = (i * 1.0 / burnin)**4
+ lr = base_lr * factor
+ value.append(lr)
+ if i > 0:
+ boundary.append(i)
+ return boundary, value
+
+
+@register
+class LearningRate(object):
+ """
+ Learning Rate configuration
+
+ Args:
+ base_lr (float): base learning rate
+ schedulers (list): learning rate schedulers
+ """
+ __category__ = 'optim'
+
+ def __init__(self,
+ base_lr=0.01,
+ schedulers=[PiecewiseDecay(), LinearWarmup()]):
+ super(LearningRate, self).__init__()
+ self.base_lr = base_lr
+ self.schedulers = schedulers
+
+ def __call__(self, step_per_epoch):
+ assert len(self.schedulers) >= 1
+ if not self.schedulers[0].use_warmup:
+ return self.schedulers[0](base_lr=self.base_lr,
+ step_per_epoch=step_per_epoch)
+
+ # TODO: split warmup & decay
+ # warmup
+ boundary, value = self.schedulers[1](self.base_lr, step_per_epoch)
+ # decay
+ decay_lr = self.schedulers[0](self.base_lr, boundary, value,
+ step_per_epoch)
+ return decay_lr
+
+
+@register
+class OptimizerBuilder():
+ """
+ Build optimizer handles
+ Args:
+ regularizer (object): an `Regularizer` instance
+ optimizer (object): an `Optimizer` instance
+ """
+ __category__ = 'optim'
+
+ def __init__(self,
+ clip_grad_by_norm=None,
+ regularizer={'type': 'L2',
+ 'factor': .0001},
+ optimizer={'type': 'Momentum',
+ 'momentum': .9}):
+ self.clip_grad_by_norm = clip_grad_by_norm
+ self.regularizer = regularizer
+ self.optimizer = optimizer
+
+ def __call__(self, learning_rate, model=None):
+ if self.clip_grad_by_norm is not None:
+ grad_clip = nn.ClipGradByGlobalNorm(
+ clip_norm=self.clip_grad_by_norm)
+ else:
+ grad_clip = None
+ if self.regularizer and self.regularizer != 'None':
+ reg_type = self.regularizer['type'] + 'Decay'
+ reg_factor = self.regularizer['factor']
+ regularization = getattr(regularizer, reg_type)(reg_factor)
+ else:
+ regularization = None
+
+ optim_args = self.optimizer.copy()
+ optim_type = optim_args['type']
+ del optim_args['type']
+ if optim_type != 'AdamW':
+ optim_args['weight_decay'] = regularization
+ op = getattr(optimizer, optim_type)
+
+ if 'without_weight_decay_params' in optim_args:
+ keys = optim_args['without_weight_decay_params']
+ params = [{
+ 'params': [
+ p for n, p in model.named_parameters()
+ if any([k in n for k in keys])
+ ],
+ 'weight_decay': 0.
+ }, {
+ 'params': [
+ p for n, p in model.named_parameters()
+ if all([k not in n for k in keys])
+ ]
+ }]
+ del optim_args['without_weight_decay_params']
+ else:
+ params = model.parameters()
+
+ return op(learning_rate=learning_rate,
+ parameters=params,
+ grad_clip=grad_clip,
+ **optim_args)
+
+
+class ModelEMA(object):
+ """
+ Exponential Weighted Average for Deep Neutal Networks
+ Args:
+ model (nn.Layer): Detector of model.
+ decay (int): The decay used for updating ema parameter.
+ Ema's parameter are updated with the formula:
+ `ema_param = decay * ema_param + (1 - decay) * cur_param`.
+ Defaults is 0.9998.
+ use_thres_step (bool): Whether set decay by thres_step or not
+ cycle_epoch (int): The epoch of interval to reset ema_param and
+ step. Defaults is -1, which means not reset. Its function is to
+ add a regular effect to ema, which is set according to experience
+ and is effective when the total training epoch is large.
+ """
+
+ def __init__(self,
+ model,
+ decay=0.9998,
+ use_thres_step=False,
+ cycle_epoch=-1):
+ self.step = 0
+ self.epoch = 0
+ self.decay = decay
+ self.state_dict = dict()
+ for k, v in model.state_dict().items():
+ self.state_dict[k] = paddle.zeros_like(v)
+ self.use_thres_step = use_thres_step
+ self.cycle_epoch = cycle_epoch
+
+ def reset(self):
+ self.step = 0
+ self.epoch = 0
+ for k, v in self.state_dict.items():
+ self.state_dict[k] = paddle.zeros_like(v)
+
+ def update(self, model):
+ if self.use_thres_step:
+ decay = min(self.decay, (1 + self.step) / (10 + self.step))
+ else:
+ decay = self.decay
+ self._decay = decay
+ model_dict = model.state_dict()
+ for k, v in self.state_dict.items():
+ v = decay * v + (1 - decay) * model_dict[k]
+ v.stop_gradient = True
+ self.state_dict[k] = v
+ self.step += 1
+
+ def apply(self):
+ if self.step == 0:
+ return self.state_dict
+ state_dict = dict()
+ for k, v in self.state_dict.items():
+ v = v / (1 - self._decay**self.step)
+ v.stop_gradient = True
+ state_dict[k] = v
+ self.epoch += 1
+ if self.cycle_epoch > 0 and self.epoch == self.cycle_epoch:
+ self.reset()
+
+ return state_dict
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__init__.py
new file mode 100644
index 000000000..dc22d0717
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__init__.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import prune
+from . import quant
+from . import distill
+from . import unstructured_prune
+
+from .prune import *
+from .quant import *
+from .distill import *
+from .unstructured_prune import *
+
+import yaml
+from ppdet.core.workspace import load_config
+from ppdet.utils.checkpoint import load_pretrain_weight
+
+
+def build_slim_model(cfg, slim_cfg, mode='train'):
+ with open(slim_cfg) as f:
+ slim_load_cfg = yaml.load(f, Loader=yaml.Loader)
+ if mode != 'train' and slim_load_cfg['slim'] == 'Distill':
+ return cfg
+
+ if slim_load_cfg['slim'] == 'Distill':
+ model = DistillModel(cfg, slim_cfg)
+ cfg['model'] = model
+ elif slim_load_cfg['slim'] == 'DistillPrune':
+ if mode == 'train':
+ model = DistillModel(cfg, slim_cfg)
+ pruner = create(cfg.pruner)
+ pruner(model.student_model)
+ else:
+ model = create(cfg.architecture)
+ weights = cfg.weights
+ load_config(slim_cfg)
+ pruner = create(cfg.pruner)
+ model = pruner(model)
+ load_pretrain_weight(model, weights)
+ cfg['model'] = model
+ cfg['slim_type'] = cfg.slim
+ elif slim_load_cfg['slim'] == 'PTQ':
+ model = create(cfg.architecture)
+ load_config(slim_cfg)
+ load_pretrain_weight(model, cfg.weights)
+ slim = create(cfg.slim)
+ cfg['slim_type'] = cfg.slim
+ cfg['model'] = slim(model)
+ cfg['slim'] = slim
+ elif slim_load_cfg['slim'] == 'UnstructuredPruner':
+ load_config(slim_cfg)
+ slim = create(cfg.slim)
+ cfg['slim_type'] = cfg.slim
+ cfg['slim'] = slim
+ cfg['unstructured_prune'] = True
+ else:
+ load_config(slim_cfg)
+ model = create(cfg.architecture)
+ if mode == 'train':
+ load_pretrain_weight(model, cfg.pretrain_weights)
+ slim = create(cfg.slim)
+ cfg['slim_type'] = cfg.slim
+ # TODO: fix quant export model in framework.
+ if mode == 'test' and slim_load_cfg['slim'] == 'QAT':
+ slim.quant_config['activation_preprocess_type'] = None
+ cfg['model'] = slim(model)
+ cfg['slim'] = slim
+ if mode != 'train':
+ load_pretrain_weight(cfg['model'], cfg.weights)
+
+ return cfg
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..6efc3d48f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/distill.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/distill.cpython-37.pyc
new file mode 100644
index 000000000..2bcf7f5ed
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/distill.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/prune.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/prune.cpython-37.pyc
new file mode 100644
index 000000000..db85654e8
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/prune.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/quant.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/quant.cpython-37.pyc
new file mode 100644
index 000000000..532a6a089
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/quant.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/unstructured_prune.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/unstructured_prune.cpython-37.pyc
new file mode 100644
index 000000000..acdb77ed0
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/__pycache__/unstructured_prune.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/distill.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/distill.py
new file mode 100644
index 000000000..b808553dd
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/distill.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+
+from ppdet.core.workspace import register, create, load_config
+from ppdet.modeling import ops
+from ppdet.utils.checkpoint import load_pretrain_weight
+from ppdet.utils.logger import setup_logger
+
+logger = setup_logger(__name__)
+
+
+class DistillModel(nn.Layer):
+ def __init__(self, cfg, slim_cfg):
+ super(DistillModel, self).__init__()
+
+ self.student_model = create(cfg.architecture)
+ logger.debug('Load student model pretrain_weights:{}'.format(
+ cfg.pretrain_weights))
+ load_pretrain_weight(self.student_model, cfg.pretrain_weights)
+
+ slim_cfg = load_config(slim_cfg)
+ self.teacher_model = create(slim_cfg.architecture)
+ self.distill_loss = create(slim_cfg.distill_loss)
+ logger.debug('Load teacher model pretrain_weights:{}'.format(
+ slim_cfg.pretrain_weights))
+ load_pretrain_weight(self.teacher_model, slim_cfg.pretrain_weights)
+
+ for param in self.teacher_model.parameters():
+ param.trainable = False
+
+ def parameters(self):
+ return self.student_model.parameters()
+
+ def forward(self, inputs):
+ if self.training:
+ teacher_loss = self.teacher_model(inputs)
+ student_loss = self.student_model(inputs)
+ loss = self.distill_loss(self.teacher_model, self.student_model)
+ student_loss['distill_loss'] = loss
+ student_loss['teacher_loss'] = teacher_loss['loss']
+ student_loss['loss'] += student_loss['distill_loss']
+ return student_loss
+ else:
+ return self.student_model(inputs)
+
+
+@register
+class DistillYOLOv3Loss(nn.Layer):
+ def __init__(self, weight=1000):
+ super(DistillYOLOv3Loss, self).__init__()
+ self.weight = weight
+
+ def obj_weighted_reg(self, sx, sy, sw, sh, tx, ty, tw, th, tobj):
+ loss_x = ops.sigmoid_cross_entropy_with_logits(sx, F.sigmoid(tx))
+ loss_y = ops.sigmoid_cross_entropy_with_logits(sy, F.sigmoid(ty))
+ loss_w = paddle.abs(sw - tw)
+ loss_h = paddle.abs(sh - th)
+ loss = paddle.add_n([loss_x, loss_y, loss_w, loss_h])
+ weighted_loss = paddle.mean(loss * F.sigmoid(tobj))
+ return weighted_loss
+
+ def obj_weighted_cls(self, scls, tcls, tobj):
+ loss = ops.sigmoid_cross_entropy_with_logits(scls, F.sigmoid(tcls))
+ weighted_loss = paddle.mean(paddle.multiply(loss, F.sigmoid(tobj)))
+ return weighted_loss
+
+ def obj_loss(self, sobj, tobj):
+ obj_mask = paddle.cast(tobj > 0., dtype="float32")
+ obj_mask.stop_gradient = True
+ loss = paddle.mean(
+ ops.sigmoid_cross_entropy_with_logits(sobj, obj_mask))
+ return loss
+
+ def forward(self, teacher_model, student_model):
+ teacher_distill_pairs = teacher_model.yolo_head.loss.distill_pairs
+ student_distill_pairs = student_model.yolo_head.loss.distill_pairs
+ distill_reg_loss, distill_cls_loss, distill_obj_loss = [], [], []
+ for s_pair, t_pair in zip(student_distill_pairs, teacher_distill_pairs):
+ distill_reg_loss.append(
+ self.obj_weighted_reg(s_pair[0], s_pair[1], s_pair[2], s_pair[
+ 3], t_pair[0], t_pair[1], t_pair[2], t_pair[3], t_pair[4]))
+ distill_cls_loss.append(
+ self.obj_weighted_cls(s_pair[5], t_pair[5], t_pair[4]))
+ distill_obj_loss.append(self.obj_loss(s_pair[4], t_pair[4]))
+ distill_reg_loss = paddle.add_n(distill_reg_loss)
+ distill_cls_loss = paddle.add_n(distill_cls_loss)
+ distill_obj_loss = paddle.add_n(distill_obj_loss)
+ loss = (distill_reg_loss + distill_cls_loss + distill_obj_loss
+ ) * self.weight
+ return loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/prune.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/prune.py
new file mode 100644
index 000000000..70d3de369
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/prune.py
@@ -0,0 +1,85 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+from paddle.utils import try_import
+
+from ppdet.core.workspace import register, serializable
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+def print_prune_params(model):
+ model_dict = model.state_dict()
+ for key in model_dict.keys():
+ weight_name = model_dict[key].name
+ logger.info('Parameter name: {}, shape: {}'.format(
+ weight_name, model_dict[key].shape))
+
+
+@register
+@serializable
+class Pruner(object):
+ def __init__(self,
+ criterion,
+ pruned_params,
+ pruned_ratios,
+ print_params=False):
+ super(Pruner, self).__init__()
+ assert criterion in ['l1_norm', 'fpgm'], \
+ "unsupported prune criterion: {}".format(criterion)
+ self.criterion = criterion
+ self.pruned_params = pruned_params
+ self.pruned_ratios = pruned_ratios
+ self.print_params = print_params
+
+ def __call__(self, model):
+ # FIXME: adapt to network graph when Training and inference are
+ # inconsistent, now only supports prune inference network graph.
+ model.eval()
+ paddleslim = try_import('paddleslim')
+ from paddleslim.analysis import dygraph_flops as flops
+ input_spec = [{
+ "image": paddle.ones(
+ shape=[1, 3, 640, 640], dtype='float32'),
+ "im_shape": paddle.full(
+ [1, 2], 640, dtype='float32'),
+ "scale_factor": paddle.ones(
+ shape=[1, 2], dtype='float32')
+ }]
+ if self.print_params:
+ print_prune_params(model)
+
+ ori_flops = flops(model, input_spec) / (1000**3)
+ logger.info("FLOPs before pruning: {}GFLOPs".format(ori_flops))
+ if self.criterion == 'fpgm':
+ pruner = paddleslim.dygraph.FPGMFilterPruner(model, input_spec)
+ elif self.criterion == 'l1_norm':
+ pruner = paddleslim.dygraph.L1NormFilterPruner(model, input_spec)
+
+ logger.info("pruned params: {}".format(self.pruned_params))
+ pruned_ratios = [float(n) for n in self.pruned_ratios]
+ ratios = {}
+ for i, param in enumerate(self.pruned_params):
+ ratios[param] = pruned_ratios[i]
+ pruner.prune_vars(ratios, [0])
+ pruned_flops = flops(model, input_spec) / (1000**3)
+ logger.info("FLOPs after pruning: {}GFLOPs; pruned ratio: {}".format(
+ pruned_flops, (ori_flops - pruned_flops) / ori_flops))
+
+ return model
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/quant.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/quant.py
new file mode 100644
index 000000000..ab81127ae
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/quant.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from paddle.utils import try_import
+
+from ppdet.core.workspace import register, serializable
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+@register
+@serializable
+class QAT(object):
+ def __init__(self, quant_config, print_model):
+ super(QAT, self).__init__()
+ self.quant_config = quant_config
+ self.print_model = print_model
+
+ def __call__(self, model):
+ paddleslim = try_import('paddleslim')
+ self.quanter = paddleslim.dygraph.quant.QAT(config=self.quant_config)
+ if self.print_model:
+ logger.info("Model before quant:")
+ logger.info(model)
+
+ self.quanter.quantize(model)
+
+ if self.print_model:
+ logger.info("Quantized model:")
+ logger.info(model)
+
+ return model
+
+ def save_quantized_model(self, layer, path, input_spec=None, **config):
+ self.quanter.save_quantized_model(
+ model=layer, path=path, input_spec=input_spec, **config)
+
+
+@register
+@serializable
+class PTQ(object):
+ def __init__(self,
+ ptq_config,
+ quant_batch_num=10,
+ output_dir='output_inference',
+ fuse=True,
+ fuse_list=None):
+ super(PTQ, self).__init__()
+ self.ptq_config = ptq_config
+ self.quant_batch_num = quant_batch_num
+ self.output_dir = output_dir
+ self.fuse = fuse
+ self.fuse_list = fuse_list
+
+ def __call__(self, model):
+ paddleslim = try_import('paddleslim')
+ self.ptq = paddleslim.PTQ(**self.ptq_config)
+ model.eval()
+ quant_model = self.ptq.quantize(
+ model, fuse=self.fuse, fuse_list=self.fuse_list)
+
+ return quant_model
+
+ def save_quantized_model(self,
+ quant_model,
+ quantize_model_path,
+ input_spec=None):
+ self.ptq.save_quantized_model(quant_model, quantize_model_path,
+ input_spec)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/unstructured_prune.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/unstructured_prune.py
new file mode 100644
index 000000000..1dc876a8c
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/slim/unstructured_prune.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from paddle.utils import try_import
+
+from ppdet.core.workspace import register, serializable
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+
+@register
+@serializable
+class UnstructuredPruner(object):
+ def __init__(self,
+ stable_epochs,
+ pruning_epochs,
+ tunning_epochs,
+ pruning_steps,
+ ratio,
+ initial_ratio,
+ prune_params_type=None):
+ self.stable_epochs = stable_epochs
+ self.pruning_epochs = pruning_epochs
+ self.tunning_epochs = tunning_epochs
+ self.ratio = ratio
+ self.prune_params_type = prune_params_type
+ self.initial_ratio = initial_ratio
+ self.pruning_steps = pruning_steps
+
+ def __call__(self, model, steps_per_epoch, skip_params_func=None):
+ paddleslim = try_import('paddleslim')
+ from paddleslim import GMPUnstructuredPruner
+ configs = {
+ 'pruning_strategy': 'gmp',
+ 'stable_iterations': self.stable_epochs * steps_per_epoch,
+ 'pruning_iterations': self.pruning_epochs * steps_per_epoch,
+ 'tunning_iterations': self.tunning_epochs * steps_per_epoch,
+ 'resume_iteration': 0,
+ 'pruning_steps': self.pruning_steps,
+ 'initial_ratio': self.initial_ratio,
+ }
+
+ pruner = GMPUnstructuredPruner(
+ model,
+ ratio=self.ratio,
+ skip_params_func=skip_params_func,
+ prune_params_type=self.prune_params_type,
+ local_sparsity=True,
+ configs=configs)
+
+ return pruner
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__init__.py
new file mode 100644
index 000000000..d0c32e260
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..da3643808
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/check.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/check.cpython-37.pyc
new file mode 100644
index 000000000..31e998962
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/check.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/checkpoint.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/checkpoint.cpython-37.pyc
new file mode 100644
index 000000000..353a47169
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/checkpoint.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/cli.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/cli.cpython-37.pyc
new file mode 100644
index 000000000..4c184bf00
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/cli.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/colormap.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/colormap.cpython-37.pyc
new file mode 100644
index 000000000..b6e9a2c48
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/colormap.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/download.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/download.cpython-37.pyc
new file mode 100644
index 000000000..6e2c53fa4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/download.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/logger.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/logger.cpython-37.pyc
new file mode 100644
index 000000000..6b63f47ed
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/logger.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/profiler.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/profiler.cpython-37.pyc
new file mode 100644
index 000000000..7a5b0b855
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/profiler.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/stats.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/stats.cpython-37.pyc
new file mode 100644
index 000000000..087a97fcf
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/stats.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/visualizer.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/visualizer.cpython-37.pyc
new file mode 100644
index 000000000..44fe18ba3
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/visualizer.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/voc_utils.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/voc_utils.cpython-37.pyc
new file mode 100644
index 000000000..5082f3f63
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/__pycache__/voc_utils.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/check.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/check.py
new file mode 100644
index 000000000..6c795b532
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/check.py
@@ -0,0 +1,112 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import sys
+
+import paddle
+import six
+import paddle.version as fluid_version
+
+from .logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['check_gpu', 'check_npu', 'check_version', 'check_config']
+
+
+def check_npu(use_npu):
+ """
+ Log error and exit when set use_npu=true in paddlepaddle
+ cpu/gpu/xpu version.
+ """
+ err = "Config use_npu cannot be set as true while you are " \
+ "using paddlepaddle cpu/gpu/xpu version ! \nPlease try: \n" \
+ "\t1. Install paddlepaddle-npu to run model on NPU \n" \
+ "\t2. Set use_npu as false in config file to run " \
+ "model on CPU/GPU/XPU"
+
+ try:
+ if use_npu and not paddle.is_compiled_with_npu():
+ logger.error(err)
+ sys.exit(1)
+ except Exception as e:
+ pass
+
+
+def check_gpu(use_gpu):
+ """
+ Log error and exit when set use_gpu=true in paddlepaddle
+ cpu version.
+ """
+ err = "Config use_gpu cannot be set as true while you are " \
+ "using paddlepaddle cpu version ! \nPlease try: \n" \
+ "\t1. Install paddlepaddle-gpu to run model on GPU \n" \
+ "\t2. Set use_gpu as false in config file to run " \
+ "model on CPU"
+
+ try:
+ if use_gpu and not paddle.is_compiled_with_cuda():
+ logger.error(err)
+ sys.exit(1)
+ except Exception as e:
+ pass
+
+
+def check_version(version='2.0'):
+ """
+ Log error and exit when the installed version of paddlepaddle is
+ not satisfied.
+ """
+ err = "PaddlePaddle version {} or higher is required, " \
+ "or a suitable develop version is satisfied as well. \n" \
+ "Please make sure the version is good with your code.".format(version)
+
+ version_installed = [
+ fluid_version.major, fluid_version.minor, fluid_version.patch,
+ fluid_version.rc
+ ]
+ if version_installed == ['0', '0', '0', '0']:
+ return
+ version_split = version.split('.')
+
+ length = min(len(version_installed), len(version_split))
+ for i in six.moves.range(length):
+ if version_installed[i] > version_split[i]:
+ return
+ if version_installed[i] < version_split[i]:
+ raise Exception(err)
+
+
+def check_config(cfg):
+ """
+ Check the correctness of the configuration file. Log error and exit
+ when Config is not compliant.
+ """
+ err = "'{}' not specified in config file. Please set it in config file."
+ check_list = ['architecture', 'num_classes']
+ try:
+ for var in check_list:
+ if not var in cfg:
+ logger.error(err.format(var))
+ sys.exit(1)
+ except Exception as e:
+ pass
+
+ if 'log_iter' not in cfg:
+ cfg.log_iter = 20
+
+ return cfg
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/checkpoint.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/checkpoint.py
new file mode 100644
index 000000000..b5aa84697
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/checkpoint.py
@@ -0,0 +1,226 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import errno
+import os
+import time
+import numpy as np
+import paddle
+import paddle.nn as nn
+from .download import get_weights_path
+
+from .logger import setup_logger
+logger = setup_logger(__name__)
+
+
+def is_url(path):
+ """
+ Whether path is URL.
+ Args:
+ path (string): URL string or not.
+ """
+ return path.startswith('http://') \
+ or path.startswith('https://') \
+ or path.startswith('ppdet://')
+
+
+def _get_unique_endpoints(trainer_endpoints):
+ # Sorting is to avoid different environmental variables for each card
+ trainer_endpoints.sort()
+ ips = set()
+ unique_endpoints = set()
+ for endpoint in trainer_endpoints:
+ ip = endpoint.split(":")[0]
+ if ip in ips:
+ continue
+ ips.add(ip)
+ unique_endpoints.add(endpoint)
+ logger.info("unique_endpoints {}".format(unique_endpoints))
+ return unique_endpoints
+
+
+def _strip_postfix(path):
+ path, ext = os.path.splitext(path)
+ assert ext in ['', '.pdparams', '.pdopt', '.pdmodel'], \
+ "Unknown postfix {} from weights".format(ext)
+ return path
+
+
+def load_weight(model, weight, optimizer=None):
+ if is_url(weight):
+ weight = get_weights_path(weight)
+
+ path = _strip_postfix(weight)
+ pdparam_path = path + '.pdparams'
+ if not os.path.exists(pdparam_path):
+ raise ValueError("Model pretrain path {} does not "
+ "exists.".format(pdparam_path))
+
+ param_state_dict = paddle.load(pdparam_path)
+ model_dict = model.state_dict()
+ model_weight = {}
+ incorrect_keys = 0
+
+ for key in model_dict.keys():
+ if key in param_state_dict.keys():
+ model_weight[key] = param_state_dict[key]
+ else:
+ logger.info('Unmatched key: {}'.format(key))
+ incorrect_keys += 1
+
+ assert incorrect_keys == 0, "Load weight {} incorrectly, \
+ {} keys unmatched, please check again.".format(weight,
+ incorrect_keys)
+ logger.info('Finish resuming model weights: {}'.format(pdparam_path))
+
+ model.set_dict(model_weight)
+
+ last_epoch = 0
+ if optimizer is not None and os.path.exists(path + '.pdopt'):
+ optim_state_dict = paddle.load(path + '.pdopt')
+ # to solve resume bug, will it be fixed in paddle 2.0
+ for key in optimizer.state_dict().keys():
+ if not key in optim_state_dict.keys():
+ optim_state_dict[key] = optimizer.state_dict()[key]
+ if 'last_epoch' in optim_state_dict:
+ last_epoch = optim_state_dict.pop('last_epoch')
+ optimizer.set_state_dict(optim_state_dict)
+
+ return last_epoch
+
+
+def match_state_dict(model_state_dict, weight_state_dict):
+ """
+ Match between the model state dict and pretrained weight state dict.
+ Return the matched state dict.
+
+ The method supposes that all the names in pretrained weight state dict are
+ subclass of the names in models`, if the prefix 'backbone.' in pretrained weight
+ keys is stripped. And we could get the candidates for each model key. Then we
+ select the name with the longest matched size as the final match result. For
+ example, the model state dict has the name of
+ 'backbone.res2.res2a.branch2a.conv.weight' and the pretrained weight as
+ name of 'res2.res2a.branch2a.conv.weight' and 'branch2a.conv.weight'. We
+ match the 'res2.res2a.branch2a.conv.weight' to the model key.
+ """
+
+ model_keys = sorted(model_state_dict.keys())
+ weight_keys = sorted(weight_state_dict.keys())
+
+ def match(a, b):
+ if a.startswith('backbone.res5'):
+ # In Faster RCNN, res5 pretrained weights have prefix of backbone,
+ # however, the corresponding model weights have difficult prefix,
+ # bbox_head.
+ b = b[9:]
+ return a == b or a.endswith("." + b)
+
+ match_matrix = np.zeros([len(model_keys), len(weight_keys)])
+ for i, m_k in enumerate(model_keys):
+ for j, w_k in enumerate(weight_keys):
+ if match(m_k, w_k):
+ match_matrix[i, j] = len(w_k)
+ max_id = match_matrix.argmax(1)
+ max_len = match_matrix.max(1)
+ max_id[max_len == 0] = -1
+ not_load_weight_name = []
+ for match_idx in range(len(max_id)):
+ if match_idx < len(weight_keys) and max_id[match_idx] == -1:
+ not_load_weight_name.append(weight_keys[match_idx])
+ if len(not_load_weight_name) > 0:
+ logger.info('{} in pretrained weight is not used in the model, '
+ 'and its will not be loaded'.format(not_load_weight_name))
+ matched_keys = {}
+ result_state_dict = {}
+ for model_id, weight_id in enumerate(max_id):
+ if weight_id == -1:
+ continue
+ model_key = model_keys[model_id]
+ weight_key = weight_keys[weight_id]
+ weight_value = weight_state_dict[weight_key]
+ model_value_shape = list(model_state_dict[model_key].shape)
+
+ if list(weight_value.shape) != model_value_shape:
+ logger.info(
+ 'The shape {} in pretrained weight {} is unmatched with '
+ 'the shape {} in model {}. And the weight {} will not be '
+ 'loaded'.format(weight_value.shape, weight_key,
+ model_value_shape, model_key, weight_key))
+ continue
+
+ assert model_key not in result_state_dict
+ result_state_dict[model_key] = weight_value
+ if weight_key in matched_keys:
+ raise ValueError('Ambiguity weight {} loaded, it matches at least '
+ '{} and {} in the model'.format(
+ weight_key, model_key, matched_keys[
+ weight_key]))
+ matched_keys[weight_key] = model_key
+ return result_state_dict
+
+
+def load_pretrain_weight(model, pretrain_weight):
+ if is_url(pretrain_weight):
+ pretrain_weight = get_weights_path(pretrain_weight)
+
+ path = _strip_postfix(pretrain_weight)
+ if not (os.path.isdir(path) or os.path.isfile(path) or
+ os.path.exists(path + '.pdparams')):
+ raise ValueError("Model pretrain path `{}` does not exists. "
+ "If you don't want to load pretrain model, "
+ "please delete `pretrain_weights` field in "
+ "config file.".format(path))
+
+ model_dict = model.state_dict()
+
+ weights_path = path + '.pdparams'
+ param_state_dict = paddle.load(weights_path)
+ param_state_dict = match_state_dict(model_dict, param_state_dict)
+
+ model.set_dict(param_state_dict)
+ logger.info('Finish loading model weights: {}'.format(weights_path))
+
+
+def save_model(model, optimizer, save_dir, save_name, last_epoch):
+ """
+ save model into disk.
+
+ Args:
+ model (paddle.nn.Layer): the Layer instalce to save parameters.
+ optimizer (paddle.optimizer.Optimizer): the Optimizer instance to
+ save optimizer states.
+ save_dir (str): the directory to be saved.
+ save_name (str): the path to be saved.
+ last_epoch (int): the epoch index.
+ """
+ if paddle.distributed.get_rank() != 0:
+ return
+ if not os.path.exists(save_dir):
+ os.makedirs(save_dir)
+ save_path = os.path.join(save_dir, save_name)
+ if isinstance(model, nn.Layer):
+ paddle.save(model.state_dict(), save_path + ".pdparams")
+ else:
+ assert isinstance(model,
+ dict), 'model is not a instance of nn.layer or dict'
+ paddle.save(model, save_path + ".pdparams")
+ state_dict = optimizer.state_dict()
+ state_dict['last_epoch'] = last_epoch
+ paddle.save(state_dict, save_path + ".pdopt")
+ logger.info("Save checkpoint: {}".format(save_dir))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/cli.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/cli.py
new file mode 100644
index 000000000..b8ba59d78
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/cli.py
@@ -0,0 +1,151 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from argparse import ArgumentParser, RawDescriptionHelpFormatter
+
+import yaml
+import re
+from ppdet.core.workspace import get_registered_modules, dump_value
+
+__all__ = ['ColorTTY', 'ArgsParser']
+
+
+class ColorTTY(object):
+ def __init__(self):
+ super(ColorTTY, self).__init__()
+ self.colors = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan']
+
+ def __getattr__(self, attr):
+ if attr in self.colors:
+ color = self.colors.index(attr) + 31
+
+ def color_message(message):
+ return "[{}m{}[0m".format(color, message)
+
+ setattr(self, attr, color_message)
+ return color_message
+
+ def bold(self, message):
+ return self.with_code('01', message)
+
+ def with_code(self, code, message):
+ return "[{}m{}[0m".format(code, message)
+
+
+class ArgsParser(ArgumentParser):
+ def __init__(self):
+ super(ArgsParser, self).__init__(
+ formatter_class=RawDescriptionHelpFormatter)
+ self.add_argument("-c", "--config", help="configuration file to use")
+ self.add_argument(
+ "-o", "--opt", nargs='*', help="set configuration options")
+
+ def parse_args(self, argv=None):
+ args = super(ArgsParser, self).parse_args(argv)
+ assert args.config is not None, \
+ "Please specify --config=configure_file_path."
+ args.opt = self._parse_opt(args.opt)
+ return args
+
+ def _parse_opt(self, opts):
+ config = {}
+ if not opts:
+ return config
+ for s in opts:
+ s = s.strip()
+ k, v = s.split('=', 1)
+ if '.' not in k:
+ config[k] = yaml.load(v, Loader=yaml.Loader)
+ else:
+ keys = k.split('.')
+ if keys[0] not in config:
+ config[keys[0]] = {}
+ cur = config[keys[0]]
+ for idx, key in enumerate(keys[1:]):
+ if idx == len(keys) - 2:
+ cur[key] = yaml.load(v, Loader=yaml.Loader)
+ else:
+ cur[key] = {}
+ cur = cur[key]
+ return config
+
+
+def print_total_cfg(config):
+ modules = get_registered_modules()
+ color_tty = ColorTTY()
+ green = '___{}___'.format(color_tty.colors.index('green') + 31)
+
+ styled = {}
+ for key in config.keys():
+ if not config[key]: # empty schema
+ continue
+
+ if key not in modules and not hasattr(config[key], '__dict__'):
+ styled[key] = config[key]
+ continue
+ elif key in modules:
+ module = modules[key]
+ else:
+ type_name = type(config[key]).__name__
+ if type_name in modules:
+ module = modules[type_name].copy()
+ module.update({
+ k: v
+ for k, v in config[key].__dict__.items()
+ if k in module.schema
+ })
+ key += " ({})".format(type_name)
+ default = module.find_default_keys()
+ missing = module.find_missing_keys()
+ mismatch = module.find_mismatch_keys()
+ extra = module.find_extra_keys()
+ dep_missing = []
+ for dep in module.inject:
+ if isinstance(module[dep], str) and module[dep] != '':
+ if module[dep] not in modules: # not a valid module
+ dep_missing.append(dep)
+ else:
+ dep_mod = modules[module[dep]]
+ # empty dict but mandatory
+ if not dep_mod and dep_mod.mandatory():
+ dep_missing.append(dep)
+ override = list(
+ set(module.keys()) - set(default) - set(extra) - set(dep_missing))
+ replacement = {}
+ for name in set(override + default + extra + mismatch + missing):
+ new_name = name
+ if name in missing:
+ value = ""
+ else:
+ value = module[name]
+
+ if name in extra:
+ value = dump_value(value) + " "
+ elif name in mismatch:
+ value = dump_value(value) + " "
+ elif name in dep_missing:
+ value = dump_value(value) + " "
+ elif name in override and value != '':
+ mark = green
+ new_name = mark + name
+ replacement[new_name] = value
+ styled[key] = replacement
+ buffer = yaml.dump(styled, default_flow_style=False, default_style='')
+ buffer = (re.sub(r"", r"[31m[0m", buffer))
+ buffer = (re.sub(r"", r"[33m[0m", buffer))
+ buffer = (re.sub(r"", r"[31m[0m", buffer))
+ buffer = (re.sub(r"",
+ r"[31m[0m", buffer))
+ buffer = re.sub(r"___(\d+)___(.*?):", r"[\1m\2[0m:", buffer)
+ print(buffer)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/colormap.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/colormap.py
new file mode 100644
index 000000000..a9cdbe891
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/colormap.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import numpy as np
+
+
+def colormap(rgb=False):
+ """
+ Get colormap
+
+ The code of this function is copied from https://github.com/facebookresearch/Detectron/blob/main/detectron/utils/colormap.py
+ """
+ color_list = np.array([
+ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,
+ 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,
+ 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
+ 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,
+ 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,
+ 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
+ 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,
+ 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,
+ 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
+ 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,
+ 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,
+ 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
+ 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,
+ 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,
+ 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
+ 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167,
+ 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000,
+ 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,
+ 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000,
+ 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000,
+ 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,
+ 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286,
+ 0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714,
+ 0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
+ ]).astype(np.float32)
+ color_list = color_list.reshape((-1, 3)) * 255
+ if not rgb:
+ color_list = color_list[:, ::-1]
+ return color_list
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/download.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/download.py
new file mode 100644
index 000000000..54c19c6cf
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/download.py
@@ -0,0 +1,557 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import os.path as osp
+import sys
+import yaml
+import time
+import shutil
+import requests
+import tqdm
+import hashlib
+import base64
+import binascii
+import tarfile
+import zipfile
+
+from paddle.utils.download import _get_unique_endpoints
+from ppdet.core.workspace import BASE_KEY
+from .logger import setup_logger
+from .voc_utils import create_list
+
+logger = setup_logger(__name__)
+
+__all__ = [
+ 'get_weights_path', 'get_dataset_path', 'get_config_path',
+ 'download_dataset', 'create_voc_list'
+]
+
+WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/weights")
+DATASET_HOME = osp.expanduser("~/.cache/paddle/dataset")
+CONFIGS_HOME = osp.expanduser("~/.cache/paddle/configs")
+
+# dict of {dataset_name: (download_info, sub_dirs)}
+# download info: [(url, md5sum)]
+DATASETS = {
+ 'coco': ([
+ (
+ 'http://images.cocodataset.org/zips/train2017.zip',
+ 'cced6f7f71b7629ddf16f17bbcfab6b2', ),
+ (
+ 'http://images.cocodataset.org/zips/val2017.zip',
+ '442b8da7639aecaf257c1dceb8ba8c80', ),
+ (
+ 'http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
+ 'f4bbac642086de4f52a3fdda2de5fa2c', ),
+ ], ["annotations", "train2017", "val2017"]),
+ 'voc': ([
+ (
+ 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
+ '6cd6e144f989b92b3379bac3b3de84fd', ),
+ (
+ 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
+ 'c52e279531787c972589f7e41ab4ae64', ),
+ (
+ 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
+ 'b6e924de25625d8de591ea690078ad9f', ),
+ (
+ 'https://paddledet.bj.bcebos.com/data/label_list.txt',
+ '5ae5d62183cfb6f6d3ac109359d06a1b', ),
+ ], ["VOCdevkit/VOC2012", "VOCdevkit/VOC2007"]),
+ 'wider_face': ([
+ (
+ 'https://dataset.bj.bcebos.com/wider_face/WIDER_train.zip',
+ '3fedf70df600953d25982bcd13d91ba2', ),
+ (
+ 'https://dataset.bj.bcebos.com/wider_face/WIDER_val.zip',
+ 'dfa7d7e790efa35df3788964cf0bbaea', ),
+ (
+ 'https://dataset.bj.bcebos.com/wider_face/wider_face_split.zip',
+ 'a4a898d6193db4b9ef3260a68bad0dc7', ),
+ ], ["WIDER_train", "WIDER_val", "wider_face_split"]),
+ 'fruit': ([(
+ 'https://dataset.bj.bcebos.com/PaddleDetection_demo/fruit.tar',
+ 'baa8806617a54ccf3685fa7153388ae6', ), ],
+ ['Annotations', 'JPEGImages']),
+ 'roadsign_voc': ([(
+ 'https://paddlemodels.bj.bcebos.com/object_detection/roadsign_voc.tar',
+ '8d629c0f880dd8b48de9aeff44bf1f3e', ), ], ['annotations', 'images']),
+ 'roadsign_coco': ([(
+ 'https://paddlemodels.bj.bcebos.com/object_detection/roadsign_coco.tar',
+ '49ce5a9b5ad0d6266163cd01de4b018e', ), ], ['annotations', 'images']),
+ 'spine_coco': ([(
+ 'https://paddledet.bj.bcebos.com/data/spine_coco.tar',
+ '7ed69ae73f842cd2a8cf4f58dc3c5535', ), ], ['annotations', 'images']),
+ 'mot': (),
+ 'objects365': (),
+ 'coco_ce': ([(
+ 'https://paddledet.bj.bcebos.com/data/coco_ce.tar',
+ 'eadd1b79bc2f069f2744b1dd4e0c0329', ), ], [])
+}
+
+DOWNLOAD_RETRY_LIMIT = 3
+
+PPDET_WEIGHTS_DOWNLOAD_URL_PREFIX = 'https://paddledet.bj.bcebos.com/'
+
+
+def parse_url(url):
+ url = url.replace("ppdet://", PPDET_WEIGHTS_DOWNLOAD_URL_PREFIX)
+ return url
+
+
+def get_weights_path(url):
+ """Get weights path from WEIGHTS_HOME, if not exists,
+ download it from url.
+ """
+ url = parse_url(url)
+ path, _ = get_path(url, WEIGHTS_HOME)
+ return path
+
+
+def get_config_path(url):
+ """Get weights path from CONFIGS_HOME, if not exists,
+ download it from url.
+ """
+ url = parse_url(url)
+ path = map_path(url, CONFIGS_HOME, path_depth=2)
+ if os.path.isfile(path):
+ return path
+
+ # config file not found, try download
+ # 1. clear configs directory
+ if osp.isdir(CONFIGS_HOME):
+ shutil.rmtree(CONFIGS_HOME)
+
+ # 2. get url
+ try:
+ from ppdet import __version__ as version
+ except ImportError:
+ version = None
+
+ cfg_url = "ppdet://configs/{}/configs.tar".format(version) \
+ if version else "ppdet://configs/configs.tar"
+ cfg_url = parse_url(cfg_url)
+
+ # 3. download and decompress
+ cfg_fullname = _download_dist(cfg_url, osp.dirname(CONFIGS_HOME))
+ _decompress_dist(cfg_fullname)
+
+ # 4. check config file existing
+ if os.path.isfile(path):
+ return path
+ else:
+ logger.error("Get config {} failed after download, please contact us on " \
+ "https://github.com/PaddlePaddle/PaddleDetection/issues".format(path))
+ sys.exit(1)
+
+
+def get_dataset_path(path, annotation, image_dir):
+ """
+ If path exists, return path.
+ Otherwise, get dataset path from DATASET_HOME, if not exists,
+ download it.
+ """
+ if _dataset_exists(path, annotation, image_dir):
+ return path
+
+ logger.info("Dataset {} is not valid for reason above, try searching {} or "
+ "downloading dataset...".format(
+ osp.realpath(path), DATASET_HOME))
+
+ data_name = os.path.split(path.strip().lower())[-1]
+ for name, dataset in DATASETS.items():
+ if data_name == name:
+ logger.debug("Parse dataset_dir {} as dataset "
+ "{}".format(path, name))
+ if name == 'objects365':
+ raise NotImplementedError(
+ "Dataset {} is not valid for download automatically. "
+ "Please apply and download the dataset from "
+ "https://www.objects365.org/download.html".format(name))
+ data_dir = osp.join(DATASET_HOME, name)
+
+ if name == 'mot':
+ if osp.exists(path) or osp.exists(data_dir):
+ return data_dir
+ else:
+ raise NotImplementedError(
+ "Dataset {} is not valid for download automatically. "
+ "Please apply and download the dataset following docs/tutorials/PrepareMOTDataSet.md".
+ format(name))
+
+ if name == "spine_coco":
+ if _dataset_exists(data_dir, annotation, image_dir):
+ return data_dir
+
+ # For voc, only check dir VOCdevkit/VOC2012, VOCdevkit/VOC2007
+ if name in ['voc', 'fruit', 'roadsign_voc']:
+ exists = True
+ for sub_dir in dataset[1]:
+ check_dir = osp.join(data_dir, sub_dir)
+ if osp.exists(check_dir):
+ logger.info("Found {}".format(check_dir))
+ else:
+ exists = False
+ if exists:
+ return data_dir
+
+ # voc exist is checked above, voc is not exist here
+ check_exist = name != 'voc' and name != 'fruit' and name != 'roadsign_voc'
+ for url, md5sum in dataset[0]:
+ get_path(url, data_dir, md5sum, check_exist)
+
+ # voc should create list after download
+ if name == 'voc':
+ create_voc_list(data_dir)
+ return data_dir
+
+ # not match any dataset in DATASETS
+ raise ValueError(
+ "Dataset {} is not valid and cannot parse dataset type "
+ "'{}' for automaticly downloading, which only supports "
+ "'voc' , 'coco', 'wider_face', 'fruit', 'roadsign_voc' and 'mot' currently".
+ format(path, osp.split(path)[-1]))
+
+
+def create_voc_list(data_dir, devkit_subdir='VOCdevkit'):
+ logger.debug("Create voc file list...")
+ devkit_dir = osp.join(data_dir, devkit_subdir)
+ years = ['2007', '2012']
+
+ # NOTE: since using auto download VOC
+ # dataset, VOC default label list should be used,
+ # do not generate label_list.txt here. For default
+ # label, see ../data/source/voc.py
+ create_list(devkit_dir, years, data_dir)
+ logger.debug("Create voc file list finished")
+
+
+def map_path(url, root_dir, path_depth=1):
+ # parse path after download to decompress under root_dir
+ assert path_depth > 0, "path_depth should be a positive integer"
+ dirname = url
+ for _ in range(path_depth):
+ dirname = osp.dirname(dirname)
+ fpath = osp.relpath(url, dirname)
+
+ zip_formats = ['.zip', '.tar', '.gz']
+ for zip_format in zip_formats:
+ fpath = fpath.replace(zip_format, '')
+ return osp.join(root_dir, fpath)
+
+
+def get_path(url, root_dir, md5sum=None, check_exist=True):
+ """ Download from given url to root_dir.
+ if file or directory specified by url is exists under
+ root_dir, return the path directly, otherwise download
+ from url and decompress it, return the path.
+
+ url (str): download url
+ root_dir (str): root dir for downloading, it should be
+ WEIGHTS_HOME or DATASET_HOME
+ md5sum (str): md5 sum of download package
+ """
+ # parse path after download to decompress under root_dir
+ fullpath = map_path(url, root_dir)
+
+ # For same zip file, decompressed directory name different
+ # from zip file name, rename by following map
+ decompress_name_map = {
+ "VOCtrainval_11-May-2012": "VOCdevkit/VOC2012",
+ "VOCtrainval_06-Nov-2007": "VOCdevkit/VOC2007",
+ "VOCtest_06-Nov-2007": "VOCdevkit/VOC2007",
+ "annotations_trainval": "annotations"
+ }
+ for k, v in decompress_name_map.items():
+ if fullpath.find(k) >= 0:
+ fullpath = osp.join(osp.split(fullpath)[0], v)
+
+ if osp.exists(fullpath) and check_exist:
+ if not osp.isfile(fullpath) or \
+ _check_exist_file_md5(fullpath, md5sum, url):
+ logger.debug("Found {}".format(fullpath))
+ return fullpath, True
+ else:
+ os.remove(fullpath)
+
+ fullname = _download_dist(url, root_dir, md5sum)
+
+ # new weights format which postfix is 'pdparams' not
+ # need to decompress
+ if osp.splitext(fullname)[-1] not in ['.pdparams', '.yml']:
+ _decompress_dist(fullname)
+
+ return fullpath, False
+
+
+def download_dataset(path, dataset=None):
+ if dataset not in DATASETS.keys():
+ logger.error("Unknown dataset {}, it should be "
+ "{}".format(dataset, DATASETS.keys()))
+ return
+ dataset_info = DATASETS[dataset][0]
+ for info in dataset_info:
+ get_path(info[0], path, info[1], False)
+ logger.debug("Download dataset {} finished.".format(dataset))
+
+
+def _dataset_exists(path, annotation, image_dir):
+ """
+ Check if user define dataset exists
+ """
+ if not osp.exists(path):
+ logger.warning("Config dataset_dir {} is not exits, "
+ "dataset config is not valid".format(path))
+ return False
+ if annotation:
+ annotation_path = osp.join(path, annotation)
+ if not osp.isfile(annotation_path):
+ logger.warning("Config annotation {} is not a "
+ "file, dataset config is not "
+ "valid".format(annotation_path))
+ return False
+ if image_dir:
+ image_path = osp.join(path, image_dir)
+ if not osp.isdir(image_path):
+ logger.warning("Config image_dir {} is not a "
+ "directory, dataset config is not "
+ "valid".format(image_path))
+ return False
+ return True
+
+
+def _download(url, path, md5sum=None):
+ """
+ Download from url, save to path.
+
+ url (str): download url
+ path (str): download to given path
+ """
+ if not osp.exists(path):
+ os.makedirs(path)
+
+ fname = osp.split(url)[-1]
+ fullname = osp.join(path, fname)
+ retry_cnt = 0
+
+ while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum,
+ url)):
+ if retry_cnt < DOWNLOAD_RETRY_LIMIT:
+ retry_cnt += 1
+ else:
+ raise RuntimeError("Download from {} failed. "
+ "Retry limit reached".format(url))
+
+ logger.info("Downloading {} from {}".format(fname, url))
+
+ # NOTE: windows path join may incur \, which is invalid in url
+ if sys.platform == "win32":
+ url = url.replace('\\', '/')
+
+ req = requests.get(url, stream=True)
+ if req.status_code != 200:
+ raise RuntimeError("Downloading from {} failed with code "
+ "{}!".format(url, req.status_code))
+
+ # For protecting download interupted, download to
+ # tmp_fullname firstly, move tmp_fullname to fullname
+ # after download finished
+ tmp_fullname = fullname + "_tmp"
+ total_size = req.headers.get('content-length')
+ with open(tmp_fullname, 'wb') as f:
+ if total_size:
+ for chunk in tqdm.tqdm(
+ req.iter_content(chunk_size=1024),
+ total=(int(total_size) + 1023) // 1024,
+ unit='KB'):
+ f.write(chunk)
+ else:
+ for chunk in req.iter_content(chunk_size=1024):
+ if chunk:
+ f.write(chunk)
+ shutil.move(tmp_fullname, fullname)
+ return fullname
+
+
+def _download_dist(url, path, md5sum=None):
+ env = os.environ
+ if 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:
+ trainer_id = int(env['PADDLE_TRAINER_ID'])
+ num_trainers = int(env['PADDLE_TRAINERS_NUM'])
+ if num_trainers <= 1:
+ return _download(url, path, md5sum)
+ else:
+ fname = osp.split(url)[-1]
+ fullname = osp.join(path, fname)
+ lock_path = fullname + '.download.lock'
+
+ if not osp.isdir(path):
+ os.makedirs(path)
+
+ if not osp.exists(fullname):
+ from paddle.distributed import ParallelEnv
+ unique_endpoints = _get_unique_endpoints(ParallelEnv()
+ .trainer_endpoints[:])
+ with open(lock_path, 'w'): # touch
+ os.utime(lock_path, None)
+ if ParallelEnv().current_endpoint in unique_endpoints:
+ _download(url, path, md5sum)
+ os.remove(lock_path)
+ else:
+ while os.path.exists(lock_path):
+ time.sleep(0.5)
+ return fullname
+ else:
+ return _download(url, path, md5sum)
+
+
+def _check_exist_file_md5(filename, md5sum, url):
+ # if md5sum is None, and file to check is weights file,
+ # read md5um from url and check, else check md5sum directly
+ return _md5check_from_url(filename, url) if md5sum is None \
+ and filename.endswith('pdparams') \
+ else _md5check(filename, md5sum)
+
+
+def _md5check_from_url(filename, url):
+ # For weights in bcebos URLs, MD5 value is contained
+ # in request header as 'content_md5'
+ req = requests.get(url, stream=True)
+ content_md5 = req.headers.get('content-md5')
+ req.close()
+ if not content_md5 or _md5check(
+ filename,
+ binascii.hexlify(base64.b64decode(content_md5.strip('"'))).decode(
+ )):
+ return True
+ else:
+ return False
+
+
+def _md5check(fullname, md5sum=None):
+ if md5sum is None:
+ return True
+
+ logger.debug("File {} md5 checking...".format(fullname))
+ md5 = hashlib.md5()
+ with open(fullname, 'rb') as f:
+ for chunk in iter(lambda: f.read(4096), b""):
+ md5.update(chunk)
+ calc_md5sum = md5.hexdigest()
+
+ if calc_md5sum != md5sum:
+ logger.warning("File {} md5 check failed, {}(calc) != "
+ "{}(base)".format(fullname, calc_md5sum, md5sum))
+ return False
+ return True
+
+
+def _decompress(fname):
+ """
+ Decompress for zip and tar file
+ """
+ logger.info("Decompressing {}...".format(fname))
+
+ # For protecting decompressing interupted,
+ # decompress to fpath_tmp directory firstly, if decompress
+ # successed, move decompress files to fpath and delete
+ # fpath_tmp and remove download compress file.
+ fpath = osp.split(fname)[0]
+ fpath_tmp = osp.join(fpath, 'tmp')
+ if osp.isdir(fpath_tmp):
+ shutil.rmtree(fpath_tmp)
+ os.makedirs(fpath_tmp)
+
+ if fname.find('tar') >= 0:
+ with tarfile.open(fname) as tf:
+ tf.extractall(path=fpath_tmp)
+ elif fname.find('zip') >= 0:
+ with zipfile.ZipFile(fname) as zf:
+ zf.extractall(path=fpath_tmp)
+ elif fname.find('.txt') >= 0:
+ return
+ else:
+ raise TypeError("Unsupport compress file type {}".format(fname))
+
+ for f in os.listdir(fpath_tmp):
+ src_dir = osp.join(fpath_tmp, f)
+ dst_dir = osp.join(fpath, f)
+ _move_and_merge_tree(src_dir, dst_dir)
+
+ shutil.rmtree(fpath_tmp)
+ os.remove(fname)
+
+
+def _decompress_dist(fname):
+ env = os.environ
+ if 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:
+ trainer_id = int(env['PADDLE_TRAINER_ID'])
+ num_trainers = int(env['PADDLE_TRAINERS_NUM'])
+ if num_trainers <= 1:
+ _decompress(fname)
+ else:
+ lock_path = fname + '.decompress.lock'
+ from paddle.distributed import ParallelEnv
+ unique_endpoints = _get_unique_endpoints(ParallelEnv()
+ .trainer_endpoints[:])
+ # NOTE(dkp): _decompress_dist always performed after
+ # _download_dist, in _download_dist sub-trainers is waiting
+ # for download lock file release with sleeping, if decompress
+ # prograss is very fast and finished with in the sleeping gap
+ # time, e.g in tiny dataset such as coco_ce, spine_coco, main
+ # trainer may finish decompress and release lock file, so we
+ # only craete lock file in main trainer and all sub-trainer
+ # wait 1s for main trainer to create lock file, for 1s is
+ # twice as sleeping gap, this waiting time can keep all
+ # trainer pipeline in order
+ # **change this if you have more elegent methods**
+ if ParallelEnv().current_endpoint in unique_endpoints:
+ with open(lock_path, 'w'): # touch
+ os.utime(lock_path, None)
+ _decompress(fname)
+ os.remove(lock_path)
+ else:
+ time.sleep(1)
+ while os.path.exists(lock_path):
+ time.sleep(0.5)
+ else:
+ _decompress(fname)
+
+
+def _move_and_merge_tree(src, dst):
+ """
+ Move src directory to dst, if dst is already exists,
+ merge src to dst
+ """
+ if not osp.exists(dst):
+ shutil.move(src, dst)
+ elif osp.isfile(src):
+ shutil.move(src, dst)
+ else:
+ for fp in os.listdir(src):
+ src_fp = osp.join(src, fp)
+ dst_fp = osp.join(dst, fp)
+ if osp.isdir(src_fp):
+ if osp.isdir(dst_fp):
+ _move_and_merge_tree(src_fp, dst_fp)
+ else:
+ shutil.move(src_fp, dst_fp)
+ elif osp.isfile(src_fp) and \
+ not osp.isfile(dst_fp):
+ shutil.move(src_fp, dst_fp)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/logger.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/logger.py
new file mode 100644
index 000000000..51e296205
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/logger.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import sys
+
+import paddle.distributed as dist
+
+__all__ = ['setup_logger']
+
+logger_initialized = []
+
+
+def setup_logger(name="ppdet", output=None):
+ """
+ Initialize logger and set its verbosity level to INFO.
+ Args:
+ output (str): a file name or a directory to save log. If None, will not save log file.
+ If ends with ".txt" or ".log", assumed to be a file name.
+ Otherwise, logs will be saved to `output/log.txt`.
+ name (str): the root module name of this logger
+
+ Returns:
+ logging.Logger: a logger
+ """
+ logger = logging.getLogger(name)
+ if name in logger_initialized:
+ return logger
+
+ logger.setLevel(logging.INFO)
+ logger.propagate = False
+
+ formatter = logging.Formatter(
+ "[%(asctime)s] %(name)s %(levelname)s: %(message)s",
+ datefmt="%m/%d %H:%M:%S")
+ # stdout logging: master only
+ local_rank = dist.get_rank()
+ if local_rank == 0:
+ ch = logging.StreamHandler(stream=sys.stdout)
+ ch.setLevel(logging.DEBUG)
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+ # file logging: all workers
+ if output is not None:
+ if output.endswith(".txt") or output.endswith(".log"):
+ filename = output
+ else:
+ filename = os.path.join(output, "log.txt")
+ if local_rank > 0:
+ filename = filename + ".rank{}".format(local_rank)
+ os.makedirs(os.path.dirname(filename))
+ fh = logging.FileHandler(filename, mode='a')
+ fh.setLevel(logging.DEBUG)
+ fh.setFormatter(logging.Formatter())
+ logger.addHandler(fh)
+ logger_initialized.append(name)
+ return logger
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/profiler.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/profiler.py
new file mode 100644
index 000000000..cae3773fa
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/profiler.py
@@ -0,0 +1,111 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import paddle
+
+# A global variable to record the number of calling times for profiler
+# functions. It is used to specify the tracing range of training steps.
+_profiler_step_id = 0
+
+# A global variable to avoid parsing from string every time.
+_profiler_options = None
+
+
+class ProfilerOptions(object):
+ '''
+ Use a string to initialize a ProfilerOptions.
+ The string should be in the format: "key1=value1;key2=value;key3=value3".
+ For example:
+ "profile_path=model.profile"
+ "batch_range=[50, 60]; profile_path=model.profile"
+ "batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile"
+
+ ProfilerOptions supports following key-value pair:
+ batch_range - a integer list, e.g. [100, 110].
+ state - a string, the optional values are 'CPU', 'GPU' or 'All'.
+ sorted_key - a string, the optional values are 'calls', 'total',
+ 'max', 'min' or 'ave.
+ tracer_option - a string, the optional values are 'Default', 'OpDetail',
+ 'AllOpDetail'.
+ profile_path - a string, the path to save the serialized profile data,
+ which can be used to generate a timeline.
+ exit_on_finished - a boolean.
+ '''
+
+ def __init__(self, options_str):
+ assert isinstance(options_str, str)
+
+ self._options = {
+ 'batch_range': [10, 20],
+ 'state': 'All',
+ 'sorted_key': 'total',
+ 'tracer_option': 'Default',
+ 'profile_path': '/tmp/profile',
+ 'exit_on_finished': True
+ }
+ self._parse_from_string(options_str)
+
+ def _parse_from_string(self, options_str):
+ for kv in options_str.replace(' ', '').split(';'):
+ key, value = kv.split('=')
+ if key == 'batch_range':
+ value_list = value.replace('[', '').replace(']', '').split(',')
+ value_list = list(map(int, value_list))
+ if len(value_list) >= 2 and value_list[0] >= 0 and value_list[
+ 1] > value_list[0]:
+ self._options[key] = value_list
+ elif key == 'exit_on_finished':
+ self._options[key] = value.lower() in ("yes", "true", "t", "1")
+ elif key in [
+ 'state', 'sorted_key', 'tracer_option', 'profile_path'
+ ]:
+ self._options[key] = value
+
+ def __getitem__(self, name):
+ if self._options.get(name, None) is None:
+ raise ValueError(
+ "ProfilerOptions does not have an option named %s." % name)
+ return self._options[name]
+
+
+def add_profiler_step(options_str=None):
+ '''
+ Enable the operator-level timing using PaddlePaddle's profiler.
+ The profiler uses a independent variable to count the profiler steps.
+ One call of this function is treated as a profiler step.
+
+ Args:
+ profiler_options - a string to initialize the ProfilerOptions.
+ Default is None, and the profiler is disabled.
+ '''
+ if options_str is None:
+ return
+
+ global _profiler_step_id
+ global _profiler_options
+
+ if _profiler_options is None:
+ _profiler_options = ProfilerOptions(options_str)
+
+ if _profiler_step_id == _profiler_options['batch_range'][0]:
+ paddle.utils.profiler.start_profiler(_profiler_options['state'],
+ _profiler_options['tracer_option'])
+ elif _profiler_step_id == _profiler_options['batch_range'][1]:
+ paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],
+ _profiler_options['profile_path'])
+ if _profiler_options['exit_on_finished']:
+ sys.exit(0)
+
+ _profiler_step_id += 1
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/stats.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/stats.py
new file mode 100644
index 000000000..4cd36d91c
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/stats.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import numpy as np
+
+__all__ = ['SmoothedValue', 'TrainingStats']
+
+
+class SmoothedValue(object):
+ """Track a series of values and provide access to smoothed values over a
+ window or the global series average.
+ """
+
+ def __init__(self, window_size=20, fmt=None):
+ if fmt is None:
+ fmt = "{median:.4f} ({avg:.4f})"
+ self.deque = collections.deque(maxlen=window_size)
+ self.fmt = fmt
+ self.total = 0.
+ self.count = 0
+
+ def update(self, value, n=1):
+ self.deque.append(value)
+ self.count += n
+ self.total += value * n
+
+ @property
+ def median(self):
+ return np.median(self.deque)
+
+ @property
+ def avg(self):
+ return np.mean(self.deque)
+
+ @property
+ def max(self):
+ return np.max(self.deque)
+
+ @property
+ def value(self):
+ return self.deque[-1]
+
+ @property
+ def global_avg(self):
+ return self.total / self.count
+
+ def __str__(self):
+ return self.fmt.format(
+ median=self.median, avg=self.avg, max=self.max, value=self.value)
+
+
+class TrainingStats(object):
+ def __init__(self, window_size, delimiter=' '):
+ self.meters = None
+ self.window_size = window_size
+ self.delimiter = delimiter
+
+ def update(self, stats):
+ if self.meters is None:
+ self.meters = {
+ k: SmoothedValue(self.window_size)
+ for k in stats.keys()
+ }
+ for k, v in self.meters.items():
+ v.update(stats[k].numpy())
+
+ def get(self, extras=None):
+ stats = collections.OrderedDict()
+ if extras:
+ for k, v in extras.items():
+ stats[k] = v
+ for k, v in self.meters.items():
+ stats[k] = format(v.median, '.6f')
+
+ return stats
+
+ def log(self, extras=None):
+ d = self.get(extras)
+ strs = []
+ for k, v in d.items():
+ strs.append("{}: {}".format(k, str(v)))
+ return self.delimiter.join(strs)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/visualizer.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/visualizer.py
new file mode 100644
index 000000000..fdfd966e2
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/visualizer.py
@@ -0,0 +1,321 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import numpy as np
+from PIL import Image, ImageDraw
+import cv2
+import math
+
+from .colormap import colormap
+from ppdet.utils.logger import setup_logger
+logger = setup_logger(__name__)
+
+__all__ = ['visualize_results']
+
+
+def visualize_results(image,
+ bbox_res,
+ mask_res,
+ segm_res,
+ keypoint_res,
+ im_id,
+ catid2name,
+ threshold=0.5):
+ """
+ Visualize bbox and mask results
+ """
+ if bbox_res is not None:
+ image = draw_bbox(image, im_id, catid2name, bbox_res, threshold)
+ if mask_res is not None:
+ image = draw_mask(image, im_id, mask_res, threshold)
+ if segm_res is not None:
+ image = draw_segm(image, im_id, catid2name, segm_res, threshold)
+ if keypoint_res is not None:
+ image = draw_pose(image, keypoint_res, threshold)
+ return image
+
+
+def draw_mask(image, im_id, segms, threshold, alpha=0.7):
+ """
+ Draw mask on image
+ """
+ mask_color_id = 0
+ w_ratio = .4
+ color_list = colormap(rgb=True)
+ img_array = np.array(image).astype('float32')
+ for dt in np.array(segms):
+ if im_id != dt['image_id']:
+ continue
+ segm, score = dt['segmentation'], dt['score']
+ if score < threshold:
+ continue
+ import pycocotools.mask as mask_util
+ mask = mask_util.decode(segm) * 255
+ color_mask = color_list[mask_color_id % len(color_list), 0:3]
+ mask_color_id += 1
+ for c in range(3):
+ color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
+ idx = np.nonzero(mask)
+ img_array[idx[0], idx[1], :] *= 1.0 - alpha
+ img_array[idx[0], idx[1], :] += alpha * color_mask
+ return Image.fromarray(img_array.astype('uint8'))
+
+
+def draw_bbox(image, im_id, catid2name, bboxes, threshold):
+ """
+ Draw bbox on image
+ """
+ draw = ImageDraw.Draw(image)
+
+ catid2color = {}
+ color_list = colormap(rgb=True)[:40]
+ for dt in np.array(bboxes):
+ if im_id != dt['image_id']:
+ continue
+ catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']
+ if score < threshold:
+ continue
+
+ if catid not in catid2color:
+ idx = np.random.randint(len(color_list))
+ catid2color[catid] = color_list[idx]
+ color = tuple(catid2color[catid])
+
+ # draw bbox
+ if len(bbox) == 4:
+ # draw bbox
+ xmin, ymin, w, h = bbox
+ xmax = xmin + w
+ ymax = ymin + h
+ draw.line(
+ [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
+ (xmin, ymin)],
+ width=2,
+ fill=color)
+ elif len(bbox) == 8:
+ x1, y1, x2, y2, x3, y3, x4, y4 = bbox
+ draw.line(
+ [(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x1, y1)],
+ width=2,
+ fill=color)
+ xmin = min(x1, x2, x3, x4)
+ ymin = min(y1, y2, y3, y4)
+ else:
+ logger.error('the shape of bbox must be [M, 4] or [M, 8]!')
+
+ # draw label
+ text = "{} {:.2f}".format(catid2name[catid], score)
+ tw, th = draw.textsize(text)
+ draw.rectangle(
+ [(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
+ draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
+
+ return image
+
+
+def save_result(save_path, results, catid2name, threshold):
+ """
+ save result as txt
+ """
+ img_id = int(results["im_id"])
+ with open(save_path, 'w') as f:
+ if "bbox_res" in results:
+ for dt in results["bbox_res"]:
+ catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']
+ if score < threshold:
+ continue
+ # each bbox result as a line
+ # for rbox: classname score x1 y1 x2 y2 x3 y3 x4 y4
+ # for bbox: classname score x1 y1 w h
+ bbox_pred = '{} {} '.format(catid2name[catid],
+ score) + ' '.join(
+ [str(e) for e in bbox])
+ f.write(bbox_pred + '\n')
+ elif "keypoint_res" in results:
+ for dt in results["keypoint_res"]:
+ kpts = dt['keypoints']
+ scores = dt['score']
+ keypoint_pred = [img_id, scores, kpts]
+ print(keypoint_pred, file=f)
+ else:
+ print("No valid results found, skip txt save")
+
+
+def draw_segm(image,
+ im_id,
+ catid2name,
+ segms,
+ threshold,
+ alpha=0.7,
+ draw_box=True):
+ """
+ Draw segmentation on image
+ """
+ mask_color_id = 0
+ w_ratio = .4
+ color_list = colormap(rgb=True)
+ img_array = np.array(image).astype('float32')
+ for dt in np.array(segms):
+ if im_id != dt['image_id']:
+ continue
+ segm, score, catid = dt['segmentation'], dt['score'], dt['category_id']
+ if score < threshold:
+ continue
+ import pycocotools.mask as mask_util
+ mask = mask_util.decode(segm) * 255
+ color_mask = color_list[mask_color_id % len(color_list), 0:3]
+ mask_color_id += 1
+ for c in range(3):
+ color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
+ idx = np.nonzero(mask)
+ img_array[idx[0], idx[1], :] *= 1.0 - alpha
+ img_array[idx[0], idx[1], :] += alpha * color_mask
+
+ if not draw_box:
+ center_y, center_x = ndimage.measurements.center_of_mass(mask)
+ label_text = "{}".format(catid2name[catid])
+ vis_pos = (max(int(center_x) - 10, 0), int(center_y))
+ cv2.putText(img_array, label_text, vis_pos,
+ cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 255, 255))
+ else:
+ mask = mask_util.decode(segm) * 255
+ sum_x = np.sum(mask, axis=0)
+ x = np.where(sum_x > 0.5)[0]
+ sum_y = np.sum(mask, axis=1)
+ y = np.where(sum_y > 0.5)[0]
+ x0, x1, y0, y1 = x[0], x[-1], y[0], y[-1]
+ cv2.rectangle(img_array, (x0, y0), (x1, y1),
+ tuple(color_mask.astype('int32').tolist()), 1)
+ bbox_text = '%s %.2f' % (catid2name[catid], score)
+ t_size = cv2.getTextSize(bbox_text, 0, 0.3, thickness=1)[0]
+ cv2.rectangle(img_array, (x0, y0), (x0 + t_size[0],
+ y0 - t_size[1] - 3),
+ tuple(color_mask.astype('int32').tolist()), -1)
+ cv2.putText(
+ img_array,
+ bbox_text, (x0, y0 - 2),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.3, (0, 0, 0),
+ 1,
+ lineType=cv2.LINE_AA)
+
+ return Image.fromarray(img_array.astype('uint8'))
+
+
+def draw_pose(image,
+ results,
+ visual_thread=0.6,
+ save_name='pose.jpg',
+ save_dir='output',
+ returnimg=False,
+ ids=None):
+ try:
+ import matplotlib.pyplot as plt
+ import matplotlib
+ plt.switch_backend('agg')
+ except Exception as e:
+ logger.error('Matplotlib not found, please install matplotlib.'
+ 'for example: `pip install matplotlib`.')
+ raise e
+
+ skeletons = np.array([item['keypoints'] for item in results])
+ kpt_nums = 17
+ if len(skeletons) > 0:
+ kpt_nums = int(skeletons.shape[1] / 3)
+ skeletons = skeletons.reshape(-1, kpt_nums, 3)
+ if kpt_nums == 17: #plot coco keypoint
+ EDGES = [(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 8),
+ (7, 9), (8, 10), (5, 11), (6, 12), (11, 13), (12, 14),
+ (13, 15), (14, 16), (11, 12)]
+ else: #plot mpii keypoint
+ EDGES = [(0, 1), (1, 2), (3, 4), (4, 5), (2, 6), (3, 6), (6, 7), (7, 8),
+ (8, 9), (10, 11), (11, 12), (13, 14), (14, 15), (8, 12),
+ (8, 13)]
+ NUM_EDGES = len(EDGES)
+
+ colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
+ [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
+ [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
+ cmap = matplotlib.cm.get_cmap('hsv')
+ plt.figure()
+
+ img = np.array(image).astype('float32')
+
+ color_set = results['colors'] if 'colors' in results else None
+
+ if 'bbox' in results and ids is None:
+ bboxs = results['bbox']
+ for j, rect in enumerate(bboxs):
+ xmin, ymin, xmax, ymax = rect
+ color = colors[0] if color_set is None else colors[color_set[j] %
+ len(colors)]
+ cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 1)
+
+ canvas = img.copy()
+ for i in range(kpt_nums):
+ for j in range(len(skeletons)):
+ if skeletons[j][i, 2] < visual_thread:
+ continue
+ if ids is None:
+ color = colors[i] if color_set is None else colors[color_set[j]
+ %
+ len(colors)]
+ else:
+ color = get_color(ids[j])
+
+ cv2.circle(
+ canvas,
+ tuple(skeletons[j][i, 0:2].astype('int32')),
+ 2,
+ color,
+ thickness=-1)
+
+ to_plot = cv2.addWeighted(img, 0.3, canvas, 0.7, 0)
+ fig = matplotlib.pyplot.gcf()
+
+ stickwidth = 2
+
+ for i in range(NUM_EDGES):
+ for j in range(len(skeletons)):
+ edge = EDGES[i]
+ if skeletons[j][edge[0], 2] < visual_thread or skeletons[j][edge[
+ 1], 2] < visual_thread:
+ continue
+
+ cur_canvas = canvas.copy()
+ X = [skeletons[j][edge[0], 1], skeletons[j][edge[1], 1]]
+ Y = [skeletons[j][edge[0], 0], skeletons[j][edge[1], 0]]
+ mX = np.mean(X)
+ mY = np.mean(Y)
+ length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
+ angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
+ polygon = cv2.ellipse2Poly((int(mY), int(mX)),
+ (int(length / 2), stickwidth),
+ int(angle), 0, 360, 1)
+ if ids is None:
+ color = colors[i] if color_set is None else colors[color_set[j]
+ %
+ len(colors)]
+ else:
+ color = get_color(ids[j])
+ cv2.fillConvexPoly(cur_canvas, polygon, color)
+ canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
+ image = Image.fromarray(canvas.astype('uint8'))
+ plt.close()
+ return image
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/voc_utils.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/voc_utils.py
new file mode 100644
index 000000000..cd6d9f90e
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/ppdet/utils/voc_utils.py
@@ -0,0 +1,86 @@
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import os.path as osp
+import re
+import random
+
+__all__ = ['create_list']
+
+
+def create_list(devkit_dir, years, output_dir):
+ """
+ create following list:
+ 1. trainval.txt
+ 2. test.txt
+ """
+ trainval_list = []
+ test_list = []
+ for year in years:
+ trainval, test = _walk_voc_dir(devkit_dir, year, output_dir)
+ trainval_list.extend(trainval)
+ test_list.extend(test)
+
+ random.shuffle(trainval_list)
+ with open(osp.join(output_dir, 'trainval.txt'), 'w') as ftrainval:
+ for item in trainval_list:
+ ftrainval.write(item[0] + ' ' + item[1] + '\n')
+
+ with open(osp.join(output_dir, 'test.txt'), 'w') as fval:
+ ct = 0
+ for item in test_list:
+ ct += 1
+ fval.write(item[0] + ' ' + item[1] + '\n')
+
+
+def _get_voc_dir(devkit_dir, year, type):
+ return osp.join(devkit_dir, 'VOC' + year, type)
+
+
+def _walk_voc_dir(devkit_dir, year, output_dir):
+ filelist_dir = _get_voc_dir(devkit_dir, year, 'ImageSets/Main')
+ annotation_dir = _get_voc_dir(devkit_dir, year, 'Annotations')
+ img_dir = _get_voc_dir(devkit_dir, year, 'JPEGImages')
+ trainval_list = []
+ test_list = []
+ added = set()
+
+ for _, _, files in os.walk(filelist_dir):
+ for fname in files:
+ img_ann_list = []
+ if re.match(r'[a-z]+_trainval\.txt', fname):
+ img_ann_list = trainval_list
+ elif re.match(r'[a-z]+_test\.txt', fname):
+ img_ann_list = test_list
+ else:
+ continue
+ fpath = osp.join(filelist_dir, fname)
+ for line in open(fpath):
+ name_prefix = line.strip().split()[0]
+ if name_prefix in added:
+ continue
+ added.add(name_prefix)
+ ann_path = osp.join(
+ osp.relpath(annotation_dir, output_dir),
+ name_prefix + '.xml')
+ img_path = osp.join(
+ osp.relpath(img_dir, output_dir), name_prefix + '.jpg')
+ img_ann_list.append((img_path, ann_path))
+
+ return trainval_list, test_list
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__init__.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/__init__.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 000000000..0f5e2594f
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/det_preprocess.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/det_preprocess.cpython-37.pyc
new file mode 100644
index 000000000..ee6910208
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/det_preprocess.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/postprocess.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/postprocess.cpython-37.pyc
new file mode 100644
index 000000000..fd7a26257
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/postprocess.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/predict_det.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/predict_det.cpython-37.pyc
new file mode 100644
index 000000000..882ceb3dc
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/predict_det.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/predict_rec.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/predict_rec.cpython-37.pyc
new file mode 100644
index 000000000..1e9a82a46
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/predict_rec.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/preprocess.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/preprocess.cpython-37.pyc
new file mode 100644
index 000000000..bd2f03afd
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/__pycache__/preprocess.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/build_gallery.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/build_gallery.py
new file mode 100644
index 000000000..7b69a04d7
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/build_gallery.py
@@ -0,0 +1,214 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+import cv2
+import faiss
+import numpy as np
+from tqdm import tqdm
+import pickle
+
+from python.predict_rec import RecPredictor
+
+from utils import logger
+from utils import config
+
+
+def split_datafile(data_file, image_root, delimiter="\t"):
+ '''
+ data_file: image path and info, which can be splitted by spacer
+ image_root: image path root
+ delimiter: delimiter
+ '''
+ gallery_images = []
+ gallery_docs = []
+ with open(data_file, 'r', encoding='utf-8') as f:
+ lines = f.readlines()
+ for _, ori_line in enumerate(lines):
+ line = ori_line.strip().split(delimiter)
+ text_num = len(line)
+ assert text_num >= 2, f"line({ori_line}) must be splitted into at least 2 parts, but got {text_num}"
+ image_file = os.path.join(image_root, line[0])
+
+ gallery_images.append(image_file)
+ gallery_docs.append(ori_line.strip())
+
+ return gallery_images, gallery_docs
+
+
+class GalleryBuilder(object):
+ def __init__(self, config):
+
+ self.config = config
+ self.rec_predictor = RecPredictor(config)
+ assert 'IndexProcess' in config.keys(), "Index config not found ... "
+ self.build(config['IndexProcess'])
+
+ def build(self, config):
+ '''
+ build index from scratch
+ '''
+ operation_method = config.get("index_operation", "new").lower()
+
+ gallery_images, gallery_docs = split_datafile(
+ config['data_file'], config['image_root'], config['delimiter'])
+
+ # when remove data in index, do not need extract fatures
+ if operation_method != "remove":
+ gallery_features = self._extract_features(gallery_images, config)
+ assert operation_method in [
+ "new", "remove", "append"
+ ], "Only append, remove and new operation are supported"
+
+ # vector.index: faiss index file
+ # id_map.pkl: use this file to map id to image_doc
+ if operation_method in ["remove", "append"]:
+ # if remove or append, vector.index and id_map.pkl must exist
+ assert os.path.join(
+ config["index_dir"], "vector.index"
+ ), "The vector.index dose not exist in {} when 'index_operation' is not None".format(
+ config["index_dir"])
+ assert os.path.join(
+ config["index_dir"], "id_map.pkl"
+ ), "The id_map.pkl dose not exist in {} when 'index_operation' is not None".format(
+ config["index_dir"])
+ index = faiss.read_index(
+ os.path.join(config["index_dir"], "vector.index"))
+ with open(os.path.join(config["index_dir"], "id_map.pkl"),
+ 'rb') as fd:
+ ids = pickle.load(fd)
+ assert index.ntotal == len(ids.keys(
+ )), "data number in index is not equal in in id_map"
+ else:
+ if not os.path.exists(config["index_dir"]):
+ os.makedirs(config["index_dir"], exist_ok=True)
+ index_method = config.get("index_method", "HNSW32")
+
+ # if IVF method, cal ivf number automaticlly
+ if index_method == "IVF":
+ index_method = index_method + str(
+ min(int(len(gallery_images) // 8), 65536)) + ",Flat"
+
+ # for binary index, add B at head of index_method
+ if config["dist_type"] == "hamming":
+ index_method = "B" + index_method
+
+ #dist_type
+ dist_type = faiss.METRIC_INNER_PRODUCT if config[
+ "dist_type"] == "IP" else faiss.METRIC_L2
+
+ #build index
+ if config["dist_type"] == "hamming":
+ index = faiss.index_binary_factory(config["embedding_size"],
+ index_method)
+ else:
+ index = faiss.index_factory(config["embedding_size"],
+ index_method, dist_type)
+ index = faiss.IndexIDMap2(index)
+ ids = {}
+
+ if config["index_method"] == "HNSW32":
+ logger.warning(
+ "The HNSW32 method dose not support 'remove' operation")
+
+ if operation_method != "remove":
+ # calculate id for new data
+ start_id = max(ids.keys()) + 1 if ids else 0
+ ids_now = (
+ np.arange(0, len(gallery_images)) + start_id).astype(np.int64)
+
+ # only train when new index file
+ if operation_method == "new":
+ if config["dist_type"] == "hamming":
+ index.add(gallery_features)
+ else:
+ index.train(gallery_features)
+
+ if not config["dist_type"] == "hamming":
+ index.add_with_ids(gallery_features, ids_now)
+
+ for i, d in zip(list(ids_now), gallery_docs):
+ ids[i] = d
+ else:
+ if config["index_method"] == "HNSW32":
+ raise RuntimeError(
+ "The index_method: HNSW32 dose not support 'remove' operation"
+ )
+ # remove ids in id_map, remove index data in faiss index
+ remove_ids = list(
+ filter(lambda k: ids.get(k) in gallery_docs, ids.keys()))
+ remove_ids = np.asarray(remove_ids)
+ index.remove_ids(remove_ids)
+ for k in remove_ids:
+ del ids[k]
+
+ # store faiss index file and id_map file
+ if config["dist_type"] == "hamming":
+ faiss.write_index_binary(
+ index, os.path.join(config["index_dir"], "vector.index"))
+ else:
+ faiss.write_index(
+ index, os.path.join(config["index_dir"], "vector.index"))
+
+ with open(os.path.join(config["index_dir"], "id_map.pkl"), 'wb') as fd:
+ pickle.dump(ids, fd)
+
+ def _extract_features(self, gallery_images, config):
+ # extract gallery features
+ if config["dist_type"] == "hamming":
+ gallery_features = np.zeros(
+ [len(gallery_images), config['embedding_size'] // 8],
+ dtype=np.uint8)
+ else:
+ gallery_features = np.zeros(
+ [len(gallery_images), config['embedding_size']],
+ dtype=np.float32)
+
+ #construct batch imgs and do inference
+ batch_size = config.get("batch_size", 32)
+ batch_img = []
+ for i, image_file in enumerate(tqdm(gallery_images)):
+ img = cv2.imread(image_file)
+ if img is None:
+ logger.error("img empty, please check {}".format(image_file))
+ exit()
+ img = img[:, :, ::-1]
+ batch_img.append(img)
+
+ if (i + 1) % batch_size == 0:
+ rec_feat = self.rec_predictor.predict(batch_img)
+ gallery_features[i - batch_size + 1:i + 1, :] = rec_feat
+ batch_img = []
+
+ if len(batch_img) > 0:
+ rec_feat = self.rec_predictor.predict(batch_img)
+ gallery_features[-len(batch_img):, :] = rec_feat
+ batch_img = []
+
+ return gallery_features
+
+
+def main(config):
+ GalleryBuilder(config)
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/det_preprocess.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/det_preprocess.py
new file mode 100644
index 000000000..65db32dc3
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/det_preprocess.py
@@ -0,0 +1,216 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cv2
+import numpy as np
+
+
+def decode_image(im_file, im_info):
+ """read rgb image
+ Args:
+ im_file (str|np.ndarray): input can be image path or np.ndarray
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ if isinstance(im_file, str):
+ with open(im_file, 'rb') as f:
+ im_read = f.read()
+ data = np.frombuffer(im_read, dtype='uint8')
+ im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
+ else:
+ im = im_file
+ im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
+ im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
+ return im, im_info
+
+
+class DetResize(object):
+ """resize image by target_size and max_size
+ Args:
+ target_size (int): the target size of image
+ keep_ratio (bool): whether keep_ratio or not, default true
+ interp (int): method of resize
+ """
+
+ def __init__(
+ self,
+ target_size,
+ keep_ratio=True,
+ interp=cv2.INTER_LINEAR, ):
+ if isinstance(target_size, int):
+ target_size = [target_size, target_size]
+ self.target_size = target_size
+ self.keep_ratio = keep_ratio
+ self.interp = interp
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ assert len(self.target_size) == 2
+ assert self.target_size[0] > 0 and self.target_size[1] > 0
+ im_channel = im.shape[2]
+ im_scale_y, im_scale_x = self.generate_scale(im)
+ # set image_shape
+ im_info['input_shape'][1] = int(im_scale_y * im.shape[0])
+ im_info['input_shape'][2] = int(im_scale_x * im.shape[1])
+ print(0000000000000000000000000000000000000000)
+ print(im)
+ print(im_scale_x,im_scale_y,cv2.INTER_LINEAR,self.interp)
+ im = cv2.resize(
+ im,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=self.interp)
+ print(im)
+ im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
+ im_info['scale_factor'] = np.array(
+ [im_scale_y, im_scale_x]).astype('float32')
+ return im, im_info
+
+ def generate_scale(self, im):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ Returns:
+ im_scale_x: the resize ratio of X
+ im_scale_y: the resize ratio of Y
+ """
+ origin_shape = im.shape[:2]
+ im_c = im.shape[2]
+ if self.keep_ratio:
+ im_size_min = np.min(origin_shape)
+ im_size_max = np.max(origin_shape)
+ target_size_min = np.min(self.target_size)
+ target_size_max = np.max(self.target_size)
+ im_scale = float(target_size_min) / float(im_size_min)
+ if np.round(im_scale * im_size_max) > target_size_max:
+ im_scale = float(target_size_max) / float(im_size_max)
+ im_scale_x = im_scale
+ im_scale_y = im_scale
+ else:
+ resize_h, resize_w = self.target_size
+ im_scale_y = resize_h / float(origin_shape[0])
+ im_scale_x = resize_w / float(origin_shape[1])
+ return im_scale_y, im_scale_x
+
+
+class DetNormalizeImage(object):
+ """normalize image
+ Args:
+ mean (list): im - mean
+ std (list): im / std
+ is_scale (bool): whether need im / 255
+ is_channel_first (bool): if True: image shape is CHW, else: HWC
+ """
+
+ def __init__(self, mean, std, is_scale=True):
+ self.mean = mean
+ self.std = std
+ self.is_scale = is_scale
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ im = im.astype(np.float32, copy=False)
+ mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
+ std = np.array(self.std)[np.newaxis, np.newaxis, :]
+ if self.is_scale:
+ im = im / 255.0
+ print(im)
+ im -= mean
+ im /= std
+ return im, im_info
+
+
+class DetPermute(object):
+ """permute image
+ Args:
+ to_bgr (bool): whether convert RGB to BGR
+ channel_first (bool): whether convert HWC to CHW
+ """
+
+ def __init__(self, ):
+ super().__init__()
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ #im = im.transpose((2, 0, 1)).copy()
+ print("detprossssssss")
+ print(im)
+ im = im.transpose((2, 0, 1)).copy()
+ print(im)
+ return im, im_info
+
+
+class DetPadStride(object):
+ """ padding image for model with FPN , instead PadBatch(pad_to_stride, pad_gt) in original config
+ Args:
+ stride (bool): model with FPN need image shape % stride == 0
+ """
+
+ def __init__(self, stride=0):
+ self.coarsest_stride = stride
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ coarsest_stride = self.coarsest_stride
+ if coarsest_stride <= 0:
+ return im, im_info
+ im_c, im_h, im_w = im.shape
+ pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
+ pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
+ padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
+ padding_im[:, :im_h, :im_w] = im
+ return padding_im, im_info
+
+
+def det_preprocess(im, im_info, preprocess_ops):
+ for operator in preprocess_ops:
+ print(operator)
+ print(im)
+ print(666)
+ im, im_info = operator(im, im_info)
+ print(im)
+ return im, im_info
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/postprocess.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/postprocess.py
new file mode 100644
index 000000000..d26cbaa9a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/postprocess.py
@@ -0,0 +1,161 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import copy
+import shutil
+from functools import partial
+import importlib
+import numpy as np
+import paddle
+import paddle.nn.functional as F
+
+
+def build_postprocess(config):
+ if config is None:
+ return None
+
+ mod = importlib.import_module(__name__)
+ config = copy.deepcopy(config)
+
+ main_indicator = config.pop(
+ "main_indicator") if "main_indicator" in config else None
+ main_indicator = main_indicator if main_indicator else ""
+
+ func_list = []
+ for func in config:
+ func_list.append(getattr(mod, func)(**config[func]))
+ return PostProcesser(func_list, main_indicator)
+
+
+class PostProcesser(object):
+ def __init__(self, func_list, main_indicator="Topk"):
+ self.func_list = func_list
+ self.main_indicator = main_indicator
+
+ def __call__(self, x, image_file=None):
+ rtn = None
+ for func in self.func_list:
+ tmp = func(x, image_file)
+ if type(func).__name__ in self.main_indicator:
+ rtn = tmp
+ return rtn
+
+
+class Topk(object):
+ def __init__(self, topk=1, class_id_map_file=None):
+ assert isinstance(topk, (int, ))
+ self.class_id_map = self.parse_class_id_map(class_id_map_file)
+ self.topk = topk
+
+ def parse_class_id_map(self, class_id_map_file):
+ if class_id_map_file is None:
+ return None
+
+ if not os.path.exists(class_id_map_file):
+ print(
+ "Warning: If want to use your own label_dict, please input legal path!\nOtherwise label_names will be empty!"
+ )
+ return None
+
+ try:
+ class_id_map = {}
+ with open(class_id_map_file, "r") as fin:
+ lines = fin.readlines()
+ for line in lines:
+ partition = line.split("\n")[0].partition(" ")
+ class_id_map[int(partition[0])] = str(partition[-1])
+ except Exception as ex:
+ print(ex)
+ class_id_map = None
+ return class_id_map
+
+ def __call__(self, x, file_names=None, multilabel=False):
+ if file_names is not None:
+ assert x.shape[0] == len(file_names)
+ y = []
+ for idx, probs in enumerate(x):
+ index = probs.argsort(axis=0)[-self.topk:][::-1].astype(
+ "int32") if not multilabel else np.where(
+ probs >= 0.5)[0].astype("int32")
+ clas_id_list = []
+ score_list = []
+ label_name_list = []
+ for i in index:
+ clas_id_list.append(i.item())
+ score_list.append(probs[i].item())
+ if self.class_id_map is not None:
+ label_name_list.append(self.class_id_map[i.item()])
+ result = {
+ "class_ids": clas_id_list,
+ "scores": np.around(
+ score_list, decimals=5).tolist(),
+ }
+ if file_names is not None:
+ result["file_name"] = file_names[idx]
+ if label_name_list is not None:
+ result["label_names"] = label_name_list
+ y.append(result)
+ return y
+
+
+class MultiLabelTopk(Topk):
+ def __init__(self, topk=1, class_id_map_file=None):
+ super().__init__()
+
+ def __call__(self, x, file_names=None):
+ return super().__call__(x, file_names, multilabel=True)
+
+
+class SavePreLabel(object):
+ def __init__(self, save_dir):
+ if save_dir is None:
+ raise Exception(
+ "Please specify save_dir if SavePreLabel specified.")
+ self.save_dir = partial(os.path.join, save_dir)
+
+ def __call__(self, x, file_names=None):
+ if file_names is None:
+ return
+ assert x.shape[0] == len(file_names)
+ for idx, probs in enumerate(x):
+ index = probs.argsort(axis=0)[-1].astype("int32")
+ self.save(index, file_names[idx])
+
+ def save(self, id, image_file):
+ output_dir = self.save_dir(str(id))
+ os.makedirs(output_dir, exist_ok=True)
+ shutil.copy(image_file, output_dir)
+
+
+class Binarize(object):
+ def __init__(self, method="round"):
+ self.method = method
+ self.unit = np.array([[128, 64, 32, 16, 8, 4, 2, 1]]).T
+
+ def __call__(self, x, file_names=None):
+ if self.method == "round":
+ x = np.round(x + 1).astype("uint8") - 1
+
+ if self.method == "sign":
+ x = ((np.sign(x) + 1) / 2).astype("uint8")
+
+ embedding_size = x.shape[1]
+ assert embedding_size % 8 == 0, "The Binary index only support vectors with sizes multiple of 8"
+
+ byte = np.zeros([x.shape[0], embedding_size // 8], dtype=np.uint8)
+ for i in range(embedding_size // 8):
+ byte[:, i:i + 1] = np.dot(x[:, i * 8:(i + 1) * 8], self.unit)
+
+ return byte
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_cls.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_cls.py
new file mode 100644
index 000000000..cdeb32e48
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_cls.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+import cv2
+import numpy as np
+
+from utils import logger
+from utils import config
+from utils.predictor import Predictor
+from utils.get_image_list import get_image_list
+from python.preprocess import create_operators
+from python.postprocess import build_postprocess
+
+
+class ClsPredictor(Predictor):
+ def __init__(self, config):
+ super().__init__(config["Global"])
+
+ self.preprocess_ops = []
+ self.postprocess = None
+ if "PreProcess" in config:
+ if "transform_ops" in config["PreProcess"]:
+ self.preprocess_ops = create_operators(config["PreProcess"][
+ "transform_ops"])
+ if "PostProcess" in config:
+ self.postprocess = build_postprocess(config["PostProcess"])
+
+ # for whole_chain project to test each repo of paddle
+ self.benchmark = config["Global"].get("benchmark", False)
+ if self.benchmark:
+ import auto_log
+ import os
+ pid = os.getpid()
+ self.auto_logger = auto_log.AutoLogger(
+ model_name=config["Global"].get("model_name", "cls"),
+ model_precision='fp16'
+ if config["Global"]["use_fp16"] else 'fp32',
+ batch_size=config["Global"].get("batch_size", 1),
+ data_shape=[3, 224, 224],
+ save_path=config["Global"].get("save_log_path",
+ "./auto_log.log"),
+ inference_config=self.config,
+ pids=pid,
+ process_name=None,
+ gpu_ids=None,
+ time_keys=[
+ 'preprocess_time', 'inference_time', 'postprocess_time'
+ ],
+ warmup=2)
+
+ def predict(self, images):
+ input_names = self.paddle_predictor.get_input_names()
+ input_tensor = self.paddle_predictor.get_input_handle(input_names[0])
+
+ output_names = self.paddle_predictor.get_output_names()
+ output_tensor = self.paddle_predictor.get_output_handle(output_names[
+ 0])
+ if self.benchmark:
+ self.auto_logger.times.start()
+ if not isinstance(images, (list, )):
+ images = [images]
+ for idx in range(len(images)):
+ for ops in self.preprocess_ops:
+ images[idx] = ops(images[idx])
+ image = np.array(images)
+ if self.benchmark:
+ self.auto_logger.times.stamp()
+
+ input_tensor.copy_from_cpu(image)
+ self.paddle_predictor.run()
+ batch_output = output_tensor.copy_to_cpu()
+ if self.benchmark:
+ self.auto_logger.times.stamp()
+ if self.postprocess is not None:
+ batch_output = self.postprocess(batch_output)
+ if self.benchmark:
+ self.auto_logger.times.end(stamp=True)
+ return batch_output
+
+
+def main(config):
+ cls_predictor = ClsPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ batch_imgs = []
+ batch_names = []
+ cnt = 0
+ for idx, img_path in enumerate(image_list):
+ img = cv2.imread(img_path)
+ if img is None:
+ logger.warning(
+ "Image file failed to read and has been skipped. The path: {}".
+ format(img_path))
+ else:
+ img = img[:, :, ::-1]
+ batch_imgs.append(img)
+ img_name = os.path.basename(img_path)
+ batch_names.append(img_name)
+ cnt += 1
+
+ if cnt % config["Global"]["batch_size"] == 0 or (idx + 1
+ ) == len(image_list):
+ if len(batch_imgs) == 0:
+ continue
+ batch_results = cls_predictor.predict(batch_imgs)
+ for number, result_dict in enumerate(batch_results):
+ filename = batch_names[number]
+ clas_ids = result_dict["class_ids"]
+ scores_str = "[{}]".format(", ".join("{:.2f}".format(
+ r) for r in result_dict["scores"]))
+ label_names = result_dict["label_names"]
+ print("{}:\tclass id(s): {}, score(s): {}, label_name(s): {}".
+ format(filename, clas_ids, scores_str, label_names))
+ batch_imgs = []
+ batch_names = []
+ if cls_predictor.benchmark:
+ cls_predictor.auto_logger.report()
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_det.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_det.py
new file mode 100644
index 000000000..0b9c25a5a
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_det.py
@@ -0,0 +1,195 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+from utils import logger
+from utils import config
+from utils.predictor import Predictor
+from utils.get_image_list import get_image_list
+from det_preprocess import det_preprocess
+from preprocess import create_operators
+from utils.draw_bbox import draw_bbox_results
+
+import os
+import argparse
+import time
+import yaml
+import ast
+from functools import reduce
+import cv2
+import numpy as np
+import paddle
+import requests
+import base64
+import json
+class DetPredictor(Predictor):
+ def __init__(self, config):
+ super().__init__(config["Global"],
+ config["Global"]["det_inference_model_dir"])
+
+ self.preprocess_ops = create_operators(config["DetPreProcess"][
+ "transform_ops"])
+ self.config = config
+
+ def preprocess(self, img):
+ im_info = {
+ 'scale_factor': np.array(
+ [1., 1.], dtype=np.float32),
+ 'im_shape': np.array(
+ img.shape[:2], dtype=np.float32),
+ 'input_shape': self.config["Global"]["image_shape"],
+ "scale_factor": np.array(
+ [1., 1.], dtype=np.float32)
+ }
+
+ im, im_info = det_preprocess(img, im_info, self.preprocess_ops)
+ print(111111111111111111111)
+ print(im)
+ inputs = self.create_inputs(im, im_info)
+ return inputs
+
+ def create_inputs(self, im, im_info):
+ """generate input for different model type
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ model_arch (str): model type
+ Returns:
+ inputs (dict): input of model
+ """
+ inputs = {}
+ inputs['image'] = np.array((im, )).astype('float32')
+ inputs['im_shape'] = np.array(
+ (im_info['im_shape'], )).astype('float32')
+ inputs['scale_factor'] = np.array(
+ (im_info['scale_factor'], )).astype('float32')
+ #print(inputs)
+ return inputs
+
+ def parse_det_results(self, pred, threshold, label_list):
+ max_det_results = self.config["Global"]["max_det_results"]
+ keep_indexes = pred[:, 1].argsort()[::-1][:max_det_results]
+ results = []
+ for idx in keep_indexes:
+ single_res = pred[idx]
+ class_id = int(single_res[0])
+ score = single_res[1]
+ bbox = single_res[2:]
+ if score < threshold:
+ continue
+ label_name = label_list[class_id]
+ '''
+ results.append({
+ "class_id": class_id,
+ "score": score,
+ "bbox": bbox,
+ "label_name": label_name,
+ })'''
+ results.append({
+ "bbox": bbox,
+ "rec_docs": "background",
+ "rec_scores": score,
+ })
+ return results
+
+ def predict(self, image, threshold=0.5, run_benchmark=False):
+ '''
+ Args:
+ image (str/np.ndarray): path of image/ np.ndarray read by cv2ps
+ threshold (float): threshold of predicted box' score
+ Returns:
+ results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
+ matix element:[class, score, x_min, y_min, x_max, y_max]
+ MaskRCNN's results include 'masks': np.ndarray:
+ shape: [N, im_h, im_w]
+ '''
+ inputs = self.preprocess(image)
+ print(str(inputs))
+ np_boxes = None
+ input_names = self.paddle_predictor.get_input_names()
+ print(input_names)
+ for i in range(len(input_names)):
+ input_tensor = self.paddle_predictor.get_input_handle(input_names[
+ i])
+ input_tensor.copy_from_cpu(inputs[input_names[i]])
+ print("!!!!!!!",inputs[input_names[i]])
+ t1 = time.time()
+ print(self.paddle_predictor.run())
+ output_names = self.paddle_predictor.get_output_names()
+ boxes_tensor = self.paddle_predictor.get_output_handle(output_names[0])
+
+ np_boxes = boxes_tensor.copy_to_cpu()
+ t2 = time.time()
+
+ print("Inference: {} ms per batch image".format((t2 - t1) * 1000.0))
+
+ # do not perform postprocess in benchmark mode
+ results = []
+ if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
+ print('[WARNNING] No object detected.')
+ results = np.array([])
+ else:
+ results = np_boxes
+
+ results = self.parse_det_results(results,
+ self.config["Global"]["threshold"],
+ self.config["Global"]["labe_list"])
+ return results
+
+
+def main(config):
+ det_predictor = DetPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ assert config["Global"]["batch_size"] == 1
+ for idx, image_file in enumerate(image_list):
+ img = cv2.imread(image_file)[:, :, ::-1]
+ output = det_predictor.predict(img)
+ print(output)
+ draw_bbox_results(img, output, image_file)
+
+ return image_file,output
+
+def cv2_to_base64_img(img):
+ data = cv2.imencode('.jpg', img)[1]
+ return base64.b64encode(data.tostring()).decode('utf8')
+
+def solve_output(output,image_file):
+ print(image_file)
+ img = cv2.imread(image_file)
+
+ for bbox in output:
+ left,top,right,bottom = int(bbox["bbox"][0]),int(bbox["bbox"][1]),int(bbox["bbox"][2]),int(bbox["bbox"][3])
+ print(left,top,right,bottom)
+ img_crop = img[top:bottom,left:right]
+ url = "http://123.157.241.94:36807/ppyolo_mbv3/prediction"
+ img2 = {"key": ["image"], "value": [cv2_to_base64_img(img_crop)]}
+ r = requests.post(url=url,data=json.dumps(img2), timeout=5)
+ r = r.json()
+ print(r)
+ result = eval(r['value'][0])[0]
+ cv2.putText(img,str(round(float(result["scores"][0]),2)),(left,top+30), cv2.FONT_HERSHEY_SIMPLEX,1.2,(0,255,0),2)
+ cv2.putText(img,str(result["label_names"][0]),(left,top+60), cv2.FONT_HERSHEY_SIMPLEX,1.2,(0,255,0),2)
+ cv2.rectangle(img,(left ,top),(right,bottom), (0, 0, 255), 2)
+ cv2.imwrite("./output/ppyolo_result" + image_file[image_file.rfind("/"):],img)
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ image_file,output = main(config)
+ #solve_output(output,image_file)
+
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_det_bak.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_det_bak.py
new file mode 100644
index 000000000..323d65ab1
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_det_bak.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+from utils import logger
+from utils import config
+from utils.predictor import Predictor
+from utils.get_image_list import get_image_list
+from det_preprocess import det_preprocess
+from preprocess import create_operators
+from utils.draw_bbox import draw_bbox_results
+
+import os
+import argparse
+import time
+import yaml
+import ast
+from functools import reduce
+import cv2
+import numpy as np
+import paddle
+
+
+class DetPredictor(Predictor):
+ def __init__(self, config):
+ super().__init__(config["Global"],
+ config["Global"]["det_inference_model_dir"])
+
+ self.preprocess_ops = create_operators(config["DetPreProcess"][
+ "transform_ops"])
+ self.config = config
+
+ def preprocess(self, img):
+ im_info = {
+ 'scale_factor': np.array(
+ [1., 1.], dtype=np.float32),
+ 'im_shape': np.array(
+ img.shape[:2], dtype=np.float32),
+ 'input_shape': self.config["Global"]["image_shape"],
+ "scale_factor": np.array(
+ [1., 1.], dtype=np.float32)
+ }
+ im, im_info = det_preprocess(img, im_info, self.preprocess_ops)
+ inputs = self.create_inputs(im, im_info)
+ return inputs
+
+ def create_inputs(self, im, im_info):
+ """generate input for different model type
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ model_arch (str): model type
+ Returns:
+ inputs (dict): input of model
+ """
+ inputs = {}
+ inputs['image'] = np.array((im, )).astype('float32')
+ inputs['im_shape'] = np.array(
+ (im_info['im_shape'], )).astype('float32')
+ inputs['scale_factor'] = np.array(
+ (im_info['scale_factor'], )).astype('float32')
+ print(inputs)
+ return inputs
+
+ def parse_det_results(self, pred, threshold, label_list):
+ max_det_results = self.config["Global"]["max_det_results"]
+ keep_indexes = pred[:, 1].argsort()[::-1][:max_det_results]
+ results = []
+ for idx in keep_indexes:
+ single_res = pred[idx]
+ class_id = int(single_res[0])
+ score = single_res[1]
+ bbox = single_res[2:]
+ if score < threshold:
+ continue
+ label_name = label_list[class_id]
+ '''
+ results.append({
+ "class_id": class_id,
+ "score": score,
+ "bbox": bbox,
+ "label_name": label_name,
+ })'''
+ results.append({
+ "bbox": bbox,
+ "rec_docs": "background",
+ "rec_scores": score,
+ })
+ return results
+
+ def predict(self, image, threshold=0.5, run_benchmark=False):
+ '''
+ Args:
+ image (str/np.ndarray): path of image/ np.ndarray read by cv2
+ threshold (float): threshold of predicted box' score
+ Returns:
+ results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
+ matix element:[class, score, x_min, y_min, x_max, y_max]
+ MaskRCNN's results include 'masks': np.ndarray:
+ shape: [N, im_h, im_w]
+ '''
+ inputs = self.preprocess(image)
+ np_boxes = None
+ input_names = self.paddle_predictor.get_input_names()
+
+ for i in range(len(input_names)):
+ input_tensor = self.paddle_predictor.get_input_handle(input_names[
+ i])
+ input_tensor.copy_from_cpu(inputs[input_names[i]])
+
+ t1 = time.time()
+ self.paddle_predictor.run()
+ output_names = self.paddle_predictor.get_output_names()
+ boxes_tensor = self.paddle_predictor.get_output_handle(output_names[0])
+ np_boxes = boxes_tensor.copy_to_cpu()
+ t2 = time.time()
+
+ print("Inference: {} ms per batch image".format((t2 - t1) * 1000.0))
+
+ # do not perform postprocess in benchmark mode
+ results = []
+ if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
+ print('[WARNNING] No object detected.')
+ results = np.array([])
+ else:
+ results = np_boxes
+
+ results = self.parse_det_results(results,
+ self.config["Global"]["threshold"],
+ self.config["Global"]["labe_list"])
+ return results
+
+
+def main(config):
+ det_predictor = DetPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ assert config["Global"]["batch_size"] == 1
+ for idx, image_file in enumerate(image_list):
+ img = cv2.imread(image_file)[:, :, ::-1]
+ output = det_predictor.predict(img)
+ print(output)
+ draw_bbox_results(img, output, image_file)
+ print(output)
+
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_rec.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_rec.py
new file mode 100644
index 000000000..d41c513f8
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_rec.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+import cv2
+import numpy as np
+
+from utils import logger
+from utils import config
+from utils.predictor import Predictor
+from utils.get_image_list import get_image_list
+from preprocess import create_operators
+from postprocess import build_postprocess
+
+
+class RecPredictor(Predictor):
+ def __init__(self, config):
+ super().__init__(config["Global"],
+ config["Global"]["rec_inference_model_dir"])
+ self.preprocess_ops = create_operators(config["RecPreProcess"][
+ "transform_ops"])
+ self.postprocess = build_postprocess(config["RecPostProcess"])
+
+ def predict(self, images, feature_normalize=True):
+ input_names = self.paddle_predictor.get_input_names()
+ input_tensor = self.paddle_predictor.get_input_handle(input_names[0])
+
+ output_names = self.paddle_predictor.get_output_names()
+ output_tensor = self.paddle_predictor.get_output_handle(output_names[
+ 0])
+
+ if not isinstance(images, (list, )):
+ images = [images]
+ for idx in range(len(images)):
+ for ops in self.preprocess_ops:
+ images[idx] = ops(images[idx])
+ image = np.array(images)
+
+ input_tensor.copy_from_cpu(image)
+ self.paddle_predictor.run()
+ batch_output = output_tensor.copy_to_cpu()
+
+ if feature_normalize:
+ feas_norm = np.sqrt(
+ np.sum(np.square(batch_output), axis=1, keepdims=True))
+ batch_output = np.divide(batch_output, feas_norm)
+
+ if self.postprocess is not None:
+ batch_output = self.postprocess(batch_output)
+ return batch_output
+
+
+def main(config):
+ rec_predictor = RecPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ batch_imgs = []
+ batch_names = []
+ cnt = 0
+ for idx, img_path in enumerate(image_list):
+ img = cv2.imread(img_path)
+ if img is None:
+ logger.warning(
+ "Image file failed to read and has been skipped. The path: {}".
+ format(img_path))
+ else:
+ img = img[:, :, ::-1]
+ batch_imgs.append(img)
+ img_name = os.path.basename(img_path)
+ batch_names.append(img_name)
+ cnt += 1
+
+ if cnt % config["Global"]["batch_size"] == 0 or (idx + 1) == len(image_list):
+ if len(batch_imgs) == 0:
+ continue
+
+ batch_results = rec_predictor.predict(batch_imgs)
+ for number, result_dict in enumerate(batch_results):
+ filename = batch_names[number]
+ print("{}:\t{}".format(filename, result_dict))
+ batch_imgs = []
+ batch_names = []
+
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_system.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_system.py
new file mode 100644
index 000000000..fb2d66a53
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/predict_system.py
@@ -0,0 +1,145 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import sys
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
+
+import copy
+import cv2
+import numpy as np
+import faiss
+import pickle
+
+from python.predict_rec import RecPredictor
+from python.predict_det import DetPredictor
+
+from utils import logger
+from utils import config
+from utils.get_image_list import get_image_list
+from utils.draw_bbox import draw_bbox_results
+
+
+class SystemPredictor(object):
+ def __init__(self, config):
+
+ self.config = config
+ self.rec_predictor = RecPredictor(config)
+ self.det_predictor = DetPredictor(config)
+
+ assert 'IndexProcess' in config.keys(), "Index config not found ... "
+ self.return_k = self.config['IndexProcess']['return_k']
+
+ index_dir = self.config["IndexProcess"]["index_dir"]
+ assert os.path.exists(os.path.join(
+ index_dir, "vector.index")), "vector.index not found ..."
+ assert os.path.exists(os.path.join(
+ index_dir, "id_map.pkl")), "id_map.pkl not found ... "
+
+ if config['IndexProcess'].get("binary_index", False):
+ self.Searcher = faiss.read_index_binary(
+ os.path.join(index_dir, "vector.index"))
+ else:
+ self.Searcher = faiss.read_index(
+ os.path.join(index_dir, "vector.index"))
+
+ with open(os.path.join(index_dir, "id_map.pkl"), "rb") as fd:
+ self.id_map = pickle.load(fd)
+
+ def append_self(self, results, shape):
+ results.append({
+ "class_id": 0,
+ "score": 1.0,
+ "bbox":
+ np.array([0, 0, shape[1], shape[0]]), # xmin, ymin, xmax, ymax
+ "label_name": "foreground",
+ })
+ return results
+
+ def nms_to_rec_results(self, results, thresh=0.1):
+ filtered_results = []
+ x1 = np.array([r["bbox"][0] for r in results]).astype("float32")
+ y1 = np.array([r["bbox"][1] for r in results]).astype("float32")
+ x2 = np.array([r["bbox"][2] for r in results]).astype("float32")
+ y2 = np.array([r["bbox"][3] for r in results]).astype("float32")
+ scores = np.array([r["rec_scores"] for r in results])
+
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+ order = scores.argsort()[::-1]
+ while order.size > 0:
+ i = order[0]
+ xx1 = np.maximum(x1[i], x1[order[1:]])
+ yy1 = np.maximum(y1[i], y1[order[1:]])
+ xx2 = np.minimum(x2[i], x2[order[1:]])
+ yy2 = np.minimum(y2[i], y2[order[1:]])
+
+ w = np.maximum(0.0, xx2 - xx1 + 1)
+ h = np.maximum(0.0, yy2 - yy1 + 1)
+ inter = w * h
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
+ inds = np.where(ovr <= thresh)[0]
+ order = order[inds + 1]
+ filtered_results.append(results[i])
+
+ return filtered_results
+
+ def predict(self, img):
+ output = []
+ # st1: get all detection results
+ results = self.det_predictor.predict(img)
+
+ # st2: add the whole image for recognition to improve recall
+ results = self.append_self(results, img.shape)
+
+ # st3: recognition process, use score_thres to ensure accuracy
+ for result in results:
+ preds = {}
+ xmin, ymin, xmax, ymax = result["bbox"].astype("int")
+ crop_img = img[ymin:ymax, xmin:xmax, :].copy()
+ rec_results = self.rec_predictor.predict(crop_img)
+ preds["bbox"] = [xmin, ymin, xmax, ymax]
+ scores, docs = self.Searcher.search(rec_results, self.return_k)
+
+ # just top-1 result will be returned for the final
+ if scores[0][0] >= self.config["IndexProcess"]["score_thres"]:
+ preds["rec_docs"] = self.id_map[docs[0][0]].split()[1]
+ preds["rec_scores"] = scores[0][0]
+ output.append(preds)
+
+ # st5: nms to the final results to avoid fetching duplicate results
+ output = self.nms_to_rec_results(
+ output, self.config["Global"]["rec_nms_thresold"])
+
+ return output
+
+
+def main(config):
+ system_predictor = SystemPredictor(config)
+ image_list = get_image_list(config["Global"]["infer_imgs"])
+
+ assert config["Global"]["batch_size"] == 1
+ for idx, image_file in enumerate(image_list):
+ img = cv2.imread(image_file)[:, :, ::-1]
+ output = system_predictor.predict(img)
+ print(image_file)
+ draw_bbox_results(img, output, image_file)
+ print(output)
+ return
+
+
+if __name__ == "__main__":
+ args = config.parse_args()
+ config = config.get_config(args.config, overrides=args.override, show=True)
+ main(config)
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/preprocess.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/preprocess.py
new file mode 100644
index 000000000..1da32ad6e
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/python/preprocess.py
@@ -0,0 +1,337 @@
+"""
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from functools import partial
+import six
+import math
+import random
+import cv2
+import numpy as np
+import importlib
+from PIL import Image
+
+from python.det_preprocess import DetNormalizeImage, DetPadStride, DetPermute, DetResize
+
+
+def create_operators(params):
+ """
+ create operators based on the config
+
+ Args:
+ params(list): a dict list, used to create some operators
+ """
+ assert isinstance(params, list), ('operator config should be a list')
+ mod = importlib.import_module(__name__)
+ ops = []
+ for operator in params:
+ assert isinstance(operator,
+ dict) and len(operator) == 1, "yaml format error"
+ op_name = list(operator)[0]
+ param = {} if operator[op_name] is None else operator[op_name]
+ op = getattr(mod, op_name)(**param)
+ ops.append(op)
+
+ return ops
+
+
+class UnifiedResize(object):
+ def __init__(self, interpolation=None, backend="cv2"):
+ _cv2_interp_from_str = {
+ 'nearest': cv2.INTER_NEAREST,
+ 'bilinear': cv2.INTER_LINEAR,
+ 'area': cv2.INTER_AREA,
+ 'bicubic': cv2.INTER_CUBIC,
+ 'lanczos': cv2.INTER_LANCZOS4
+ }
+ _pil_interp_from_str = {
+ 'nearest': Image.NEAREST,
+ 'bilinear': Image.BILINEAR,
+ 'bicubic': Image.BICUBIC,
+ 'box': Image.BOX,
+ 'lanczos': Image.LANCZOS,
+ 'hamming': Image.HAMMING
+ }
+
+ def _pil_resize(src, size, resample):
+ pil_img = Image.fromarray(src)
+ pil_img = pil_img.resize(size, resample)
+ return np.asarray(pil_img)
+
+ if backend.lower() == "cv2":
+ if isinstance(interpolation, str):
+ interpolation = _cv2_interp_from_str[interpolation.lower()]
+ # compatible with opencv < version 4.4.0
+ elif interpolation is None:
+ interpolation = cv2.INTER_LINEAR
+ self.resize_func = partial(cv2.resize, interpolation=interpolation)
+ elif backend.lower() == "pil":
+ if isinstance(interpolation, str):
+ interpolation = _pil_interp_from_str[interpolation.lower()]
+ self.resize_func = partial(_pil_resize, resample=interpolation)
+ else:
+ logger.warning(
+ f"The backend of Resize only support \"cv2\" or \"PIL\". \"f{backend}\" is unavailable. Use \"cv2\" instead."
+ )
+ self.resize_func = cv2.resize
+
+ def __call__(self, src, size):
+ return self.resize_func(src, size)
+
+
+class OperatorParamError(ValueError):
+ """ OperatorParamError
+ """
+ pass
+
+
+class DecodeImage(object):
+ """ decode image """
+
+ def __init__(self, to_rgb=True, to_np=False, channel_first=False):
+ self.to_rgb = to_rgb
+ self.to_np = to_np # to numpy
+ self.channel_first = channel_first # only enabled when to_np is True
+
+ def __call__(self, img):
+ if six.PY2:
+ assert type(img) is str and len(
+ img) > 0, "invalid input 'img' in DecodeImage"
+ else:
+ assert type(img) is bytes and len(
+ img) > 0, "invalid input 'img' in DecodeImage"
+ data = np.frombuffer(img, dtype='uint8')
+ img = cv2.imdecode(data, 1)
+ if self.to_rgb:
+ assert img.shape[2] == 3, 'invalid shape of image[%s]' % (
+ img.shape)
+ img = img[:, :, ::-1]
+
+ if self.channel_first:
+ img = img.transpose((2, 0, 1))
+
+ return img
+
+
+class ResizeImage(object):
+ """ resize image """
+
+ def __init__(self,
+ size=None,
+ resize_short=None,
+ interpolation=None,
+ backend="cv2"):
+ if resize_short is not None and resize_short > 0:
+ self.resize_short = resize_short
+ self.w = None
+ self.h = None
+ elif size is not None:
+ self.resize_short = None
+ self.w = size if type(size) is int else size[0]
+ self.h = size if type(size) is int else size[1]
+ else:
+ raise OperatorParamError("invalid params for ReisizeImage for '\
+ 'both 'size' and 'resize_short' are None")
+
+ self._resize_func = UnifiedResize(
+ interpolation=interpolation, backend=backend)
+
+ def __call__(self, img):
+ img_h, img_w = img.shape[:2]
+ if self.resize_short is not None:
+ percent = float(self.resize_short) / min(img_w, img_h)
+ w = int(round(img_w * percent))
+ h = int(round(img_h * percent))
+ else:
+ w = self.w
+ h = self.h
+ return self._resize_func(img, (w, h))
+
+
+class CropImage(object):
+ """ crop image """
+
+ def __init__(self, size):
+ if type(size) is int:
+ self.size = (size, size)
+ else:
+ self.size = size # (h, w)
+
+ def __call__(self, img):
+ w, h = self.size
+ img_h, img_w = img.shape[:2]
+
+ if img_h < h or img_w < w:
+ raise Exception(
+ f"The size({h}, {w}) of CropImage must be greater than size({img_h}, {img_w}) of image. Please check image original size and size of ResizeImage if used."
+ )
+
+ w_start = (img_w - w) // 2
+ h_start = (img_h - h) // 2
+
+ w_end = w_start + w
+ h_end = h_start + h
+ return img[h_start:h_end, w_start:w_end, :]
+
+
+class RandCropImage(object):
+ """ random crop image """
+
+ def __init__(self,
+ size,
+ scale=None,
+ ratio=None,
+ interpolation=None,
+ backend="cv2"):
+ if type(size) is int:
+ self.size = (size, size) # (h, w)
+ else:
+ self.size = size
+
+ self.scale = [0.08, 1.0] if scale is None else scale
+ self.ratio = [3. / 4., 4. / 3.] if ratio is None else ratio
+
+ self._resize_func = UnifiedResize(
+ interpolation=interpolation, backend=backend)
+
+ def __call__(self, img):
+ size = self.size
+ scale = self.scale
+ ratio = self.ratio
+
+ aspect_ratio = math.sqrt(random.uniform(*ratio))
+ w = 1. * aspect_ratio
+ h = 1. / aspect_ratio
+
+ img_h, img_w = img.shape[:2]
+
+ bound = min((float(img_w) / img_h) / (w**2),
+ (float(img_h) / img_w) / (h**2))
+ scale_max = min(scale[1], bound)
+ scale_min = min(scale[0], bound)
+
+ target_area = img_w * img_h * random.uniform(scale_min, scale_max)
+ target_size = math.sqrt(target_area)
+ w = int(target_size * w)
+ h = int(target_size * h)
+
+ i = random.randint(0, img_w - w)
+ j = random.randint(0, img_h - h)
+
+ img = img[j:j + h, i:i + w, :]
+
+ return self._resize_func(img, size)
+
+
+class RandFlipImage(object):
+ """ random flip image
+ flip_code:
+ 1: Flipped Horizontally
+ 0: Flipped Vertically
+ -1: Flipped Horizontally & Vertically
+ """
+
+ def __init__(self, flip_code=1):
+ assert flip_code in [-1, 0, 1
+ ], "flip_code should be a value in [-1, 0, 1]"
+ self.flip_code = flip_code
+
+ def __call__(self, img):
+ if random.randint(0, 1) == 1:
+ return cv2.flip(img, self.flip_code)
+ else:
+ return img
+
+
+class AutoAugment(object):
+ def __init__(self):
+ self.policy = ImageNetPolicy()
+
+ def __call__(self, img):
+ from PIL import Image
+ img = np.ascontiguousarray(img)
+ img = Image.fromarray(img)
+ img = self.policy(img)
+ img = np.asarray(img)
+
+
+class NormalizeImage(object):
+ """ normalize image such as substract mean, divide std
+ """
+
+ def __init__(self,
+ scale=None,
+ mean=None,
+ std=None,
+ order='chw',
+ output_fp16=False,
+ channel_num=3):
+ if isinstance(scale, str):
+ scale = eval(scale)
+ assert channel_num in [
+ 3, 4
+ ], "channel number of input image should be set to 3 or 4."
+ self.channel_num = channel_num
+ self.output_dtype = 'float16' if output_fp16 else 'float32'
+ self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
+ self.order = order
+ mean = mean if mean is not None else [0.485, 0.456, 0.406]
+ std = std if std is not None else [0.229, 0.224, 0.225]
+
+ shape = (3, 1, 1) if self.order == 'chw' else (1, 1, 3)
+ self.mean = np.array(mean).reshape(shape).astype('float32')
+ self.std = np.array(std).reshape(shape).astype('float32')
+
+ def __call__(self, img):
+ from PIL import Image
+ if isinstance(img, Image.Image):
+ img = np.array(img)
+
+ assert isinstance(img,
+ np.ndarray), "invalid input 'img' in NormalizeImage"
+
+ img = (img.astype('float32') * self.scale - self.mean) / self.std
+
+ if self.channel_num == 4:
+ img_h = img.shape[1] if self.order == 'chw' else img.shape[0]
+ img_w = img.shape[2] if self.order == 'chw' else img.shape[1]
+ pad_zeros = np.zeros(
+ (1, img_h, img_w)) if self.order == 'chw' else np.zeros(
+ (img_h, img_w, 1))
+ img = (np.concatenate(
+ (img, pad_zeros), axis=0)
+ if self.order == 'chw' else np.concatenate(
+ (img, pad_zeros), axis=2))
+ return img.astype(self.output_dtype)
+
+
+class ToCHWImage(object):
+ """ convert hwc image to chw image
+ """
+
+ def __init__(self):
+ pass
+
+ def __call__(self, img):
+ from PIL import Image
+ if isinstance(img, Image.Image):
+ img = np.array(img)
+
+ return img.transpose((2, 0, 1))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/bbox.json b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/bbox.json
new file mode 100644
index 000000000..45b783bdd
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/bbox.json
@@ -0,0 +1 @@
+[{"image_id": 0, "category_id": 1, "bbox": [369.21038818359375, 363.6772766113281, 449.0653076171875, 618.1871643066406], "score": 0.7658376693725586}, {"image_id": 0, "category_id": 1, "bbox": [670.894775390625, 238.51173400878906, 296.062744140625, 546.7630462646484], "score": 0.6147813200950623}, {"image_id": 0, "category_id": 1, "bbox": [664.9336547851562, 337.8657531738281, 323.69287109375, 652.8916320800781], "score": 0.5161063075065613}, {"image_id": 0, "category_id": 1, "bbox": [591.9150390625, 119.05738067626953, 382.39044189453125, 587.3450241088867], "score": 0.3062722086906433}, {"image_id": 0, "category_id": 1, "bbox": [694.8301391601562, 260.4667663574219, 420.40826416015625, 624.0963439941406], "score": 0.23929257690906525}, {"image_id": 0, "category_id": 1, "bbox": [192.49095153808594, 44.44557571411133, 359.98658752441406, 603.1478691101074], "score": 0.15440843999385834}, {"image_id": 0, "category_id": 1, "bbox": [689.6312866210938, 122.9439926147461, 429.17864990234375, 588.3627090454102], "score": 0.1302269697189331}, {"image_id": 0, "category_id": 1, "bbox": [834.6945190429688, 21.451045989990234, 850.5206909179688, 952.0460243225098], "score": 0.09691941738128662}, {"image_id": 0, "category_id": 1, "bbox": [229.75222778320312, 131.3287811279297, 904.2850036621094, 925.1104278564453], "score": 0.08249522000551224}, {"image_id": 0, "category_id": 1, "bbox": [314.11334228515625, 6.857889175415039, 422.2564697265625, 579.4266567230225], "score": 0.07065446674823761}, {"image_id": 0, "category_id": 1, "bbox": [715.101806640625, 417.32574462890625, 397.4493408203125, 605.2271728515625], "score": 0.06695712357759476}, {"image_id": 0, "category_id": 1, "bbox": [49.746551513671875, 37.481727600097656, 877.1223449707031, 925.4673080444336], "score": 0.06509523093700409}, {"image_id": 0, "category_id": 1, "bbox": [348.88494873046875, 227.6735076904297, 443.2528076171875, 602.1720123291016], "score": 0.06018674373626709}, {"image_id": 0, "category_id": 1, "bbox": [257.8827209472656, 169.61471557617188, 405.0467834472656, 583.5134582519531], "score": 0.05990540236234665}, {"image_id": 0, "category_id": 1, "bbox": [577.4286499023438, 142.23439025878906, 996.9476928710938, 906.7934417724609], "score": 0.05525392293930054}, {"image_id": 0, "category_id": 1, "bbox": [1231.872802734375, 243.7411651611328, 376.67919921875, 659.8349456787109], "score": 0.04920388385653496}, {"image_id": 0, "category_id": 1, "bbox": [338.7894287109375, 32.709835052490234, 1035.148193359375, 919.6828651428223], "score": 0.04713256284594536}, {"image_id": 0, "category_id": 1, "bbox": [802.6375732421875, 308.743896484375, 432.4476318359375, 621.3531494140625], "score": 0.04656871780753136}, {"image_id": 0, "category_id": 1, "bbox": [152.52699279785156, 475.0992736816406, 1061.3697357177734, 604.9007263183594], "score": 0.045581601560115814}, {"image_id": 0, "category_id": 1, "bbox": [0.0, 244.21304321289062, 778.9253540039062, 835.7869567871094], "score": 0.04554731026291847}, {"image_id": 0, "category_id": 1, "bbox": [211.4132537841797, 0.0, 419.17298889160156, 476.0053405761719], "score": 0.042011335492134094}, {"image_id": 0, "category_id": 1, "bbox": [1016.902587890625, 147.04959106445312, 820.3148193359375, 893.8269958496094], "score": 0.04177388921380043}, {"image_id": 0, "category_id": 1, "bbox": [565.164794921875, 272.8253173828125, 367.65081787109375, 707.5872802734375], "score": 0.04128308221697807}, {"image_id": 0, "category_id": 1, "bbox": [1671.774658203125, 0.0, 248.225341796875, 163.10865783691406], "score": 0.03875980153679848}, {"image_id": 0, "category_id": 1, "bbox": [0.0, 421.52142333984375, 409.654296875, 628.8292846679688], "score": 0.038403138518333435}, {"image_id": 0, "category_id": 2, "bbox": [469.33697509765625, 663.2904663085938, 572.0955200195312, 416.70953369140625], "score": 0.33230018615722656}, {"image_id": 0, "category_id": 2, "bbox": [376.9255065917969, 723.4679565429688, 524.7114562988281, 356.53204345703125], "score": 0.2078298032283783}, {"image_id": 0, "category_id": 2, "bbox": [699.5205688476562, 669.8173217773438, 449.72991943359375, 410.18267822265625], "score": 0.18662796914577484}, {"image_id": 0, "category_id": 2, "bbox": [598.782958984375, 773.4874877929688, 513.5545654296875, 306.51251220703125], "score": 0.13477538526058197}, {"image_id": 0, "category_id": 2, "bbox": [356.8109130859375, 595.3539428710938, 435.45257568359375, 484.64605712890625], "score": 0.11906614154577255}, {"image_id": 0, "category_id": 2, "bbox": [369.21038818359375, 363.6772766113281, 449.0653076171875, 618.1871643066406], "score": 0.0925934687256813}, {"image_id": 0, "category_id": 2, "bbox": [604.5804443359375, 551.287841796875, 476.4017333984375, 528.712158203125], "score": 0.09187335520982742}, {"image_id": 0, "category_id": 2, "bbox": [675.1460571289062, 190.14633178710938, 287.07452392578125, 549.7132263183594], "score": 0.07040248066186905}, {"image_id": 0, "category_id": 2, "bbox": [885.0928344726562, 675.1228637695312, 450.03546142578125, 404.87713623046875], "score": 0.06983193010091782}, {"image_id": 0, "category_id": 2, "bbox": [800.8128662109375, 554.4234619140625, 421.53466796875, 525.5765380859375], "score": 0.06722953915596008}, {"image_id": 0, "category_id": 2, "bbox": [664.9336547851562, 337.8657531738281, 323.69287109375, 652.8916320800781], "score": 0.06181788444519043}, {"image_id": 0, "category_id": 2, "bbox": [240.40045166015625, 700.2440185546875, 485.84588623046875, 379.7559814453125], "score": 0.061111561954021454}, {"image_id": 0, "category_id": 2, "bbox": [788.89990234375, 783.9036865234375, 421.3397216796875, 296.0963134765625], "score": 0.055838700383901596}, {"image_id": 0, "category_id": 2, "bbox": [985.13818359375, 618.0466918945312, 443.4892578125, 461.95330810546875], "score": 0.055766522884368896}, {"image_id": 0, "category_id": 2, "bbox": [694.8301391601562, 260.4667663574219, 420.40826416015625, 624.0963439941406], "score": 0.051908161491155624}, {"image_id": 0, "category_id": 2, "bbox": [715.101806640625, 417.32574462890625, 397.4493408203125, 605.2271728515625], "score": 0.0512847863137722}, {"image_id": 0, "category_id": 2, "bbox": [803.2897338867188, 367.2182312011719, 432.16656494140625, 607.6370544433594], "score": 0.05090872570872307}, {"image_id": 0, "category_id": 2, "bbox": [286.189453125, 250.7058563232422, 418.119384765625, 674.7430572509766], "score": 0.04995275288820267}, {"image_id": 0, "category_id": 2, "bbox": [1231.872802734375, 243.7411651611328, 376.67919921875, 659.8349456787109], "score": 0.049444179981946945}, {"image_id": 0, "category_id": 2, "bbox": [1103.7071533203125, 296.7301940917969, 446.940185546875, 650.3741149902344], "score": 0.04784170538187027}, {"image_id": 0, "category_id": 2, "bbox": [7.775665283203125, 338.7440490722656, 989.5711364746094, 741.2559509277344], "score": 0.04766125604510307}, {"image_id": 0, "category_id": 2, "bbox": [689.6312866210938, 122.9439926147461, 429.17864990234375, 588.3627090454102], "score": 0.04750380665063858}, {"image_id": 0, "category_id": 2, "bbox": [905.9383544921875, 436.5836181640625, 396.6561279296875, 579.4443359375], "score": 0.045474935322999954}, {"image_id": 0, "category_id": 2, "bbox": [195.19969177246094, 0.0, 346.26612854003906, 519.9613647460938], "score": 0.04530280828475952}, {"image_id": 0, "category_id": 2, "bbox": [152.52699279785156, 475.0992736816406, 1061.3697357177734, 604.9007263183594], "score": 0.04524225741624832}, {"image_id": 0, "category_id": 2, "bbox": [519.4625244140625, 421.770751953125, 389.2327880859375, 658.229248046875], "score": 0.044872868806123734}, {"image_id": 0, "category_id": 2, "bbox": [316.914794921875, 0.0, 413.89373779296875, 538.5654907226562], "score": 0.0442454032599926}, {"image_id": 0, "category_id": 2, "bbox": [92.4642562866211, 647.5697021484375, 531.0228652954102, 432.4302978515625], "score": 0.044114261865615845}, {"image_id": 0, "category_id": 2, "bbox": [1076.1507568359375, 670.0338745117188, 479.9395751953125, 409.96612548828125], "score": 0.043668314814567566}, {"image_id": 0, "category_id": 2, "bbox": [978.5048828125, 0.0, 491.409423828125, 482.615478515625], "score": 0.042377494275569916}, {"image_id": 0, "category_id": 2, "bbox": [1221.3729248046875, 100.51383209228516, 374.6724853515625, 621.4558334350586], "score": 0.04213497042655945}, {"image_id": 0, "category_id": 2, "bbox": [191.49087524414062, 110.94390869140625, 349.5939636230469, 573.9314575195312], "score": 0.04188292473554611}, {"image_id": 0, "category_id": 2, "bbox": [1307.297607421875, 362.5501708984375, 339.1165771484375, 617.7063598632812], "score": 0.04179665818810463}, {"image_id": 0, "category_id": 2, "bbox": [565.164794921875, 272.8253173828125, 367.65081787109375, 707.5872802734375], "score": 0.04140790179371834}, {"image_id": 0, "category_id": 2, "bbox": [578.2806396484375, 53.80111312866211, 372.37841796875, 623.0383644104004], "score": 0.041312552988529205}, {"image_id": 0, "category_id": 2, "bbox": [1299.0963134765625, 164.76296997070312, 356.0087890625, 603.7886047363281], "score": 0.041034385561943054}, {"image_id": 0, "category_id": 2, "bbox": [656.9183349609375, 0.0, 357.2532958984375, 613.007568359375], "score": 0.04052584618330002}, {"image_id": 0, "category_id": 2, "bbox": [332.3094177246094, 170.0246124267578, 433.6133117675781, 585.7411956787109], "score": 0.04020491987466812}, {"image_id": 0, "category_id": 2, "bbox": [1229.7386474609375, 479.34918212890625, 372.51513671875, 586.5774536132812], "score": 0.03964076563715935}, {"image_id": 0, "category_id": 2, "bbox": [0.0, 129.17469787597656, 605.098876953125, 928.5396575927734], "score": 0.03956187516450882}, {"image_id": 0, "category_id": 2, "bbox": [825.9295043945312, 0.0, 414.74920654296875, 483.2961120605469], "score": 0.03814307600259781}, {"image_id": 0, "category_id": 2, "bbox": [895.5785522460938, 211.61929321289062, 412.99212646484375, 609.5415344238281], "score": 0.03801831230521202}, {"image_id": 0, "category_id": 2, "bbox": [515.4039916992188, 345.1026611328125, 1023.2372436523438, 734.8973388671875], "score": 0.037966832518577576}, {"image_id": 0, "category_id": 2, "bbox": [229.75222778320312, 131.3287811279297, 904.2850036621094, 925.1104278564453], "score": 0.03796369582414627}, {"image_id": 0, "category_id": 2, "bbox": [1625.2398681640625, 0.0, 294.7601318359375, 194.82472229003906], "score": 0.03783120587468147}, {"image_id": 0, "category_id": 2, "bbox": [791.4188842773438, 116.92776489257812, 439.65631103515625, 595.1671447753906], "score": 0.037570975720882416}, {"image_id": 0, "category_id": 2, "bbox": [1296.676513671875, 603.2489013671875, 453.5799560546875, 476.7510986328125], "score": 0.03743072599172592}, {"image_id": 0, "category_id": 2, "bbox": [1110.3851318359375, 153.0233612060547, 400.5989990234375, 617.6845855712891], "score": 0.0368758924305439}, {"image_id": 0, "category_id": 3, "bbox": [667.9281005859375, 122.06419372558594, 307.7900390625, 583.3257598876953], "score": 0.1632118672132492}, {"image_id": 0, "category_id": 3, "bbox": [664.9336547851562, 337.8657531738281, 323.69287109375, 652.8916320800781], "score": 0.14787925779819489}, {"image_id": 0, "category_id": 3, "bbox": [405.5592041015625, 366.2292175292969, 418.45135498046875, 613.0199890136719], "score": 0.11382444947957993}, {"image_id": 0, "category_id": 3, "bbox": [192.49095153808594, 44.44557571411133, 359.98658752441406, 603.1478691101074], "score": 0.07995932549238205}, {"image_id": 0, "category_id": 3, "bbox": [355.2989501953125, 477.7073669433594, 955.333740234375, 602.2926330566406], "score": 0.07945643365383148}, {"image_id": 0, "category_id": 3, "bbox": [603.8470458984375, 235.46315002441406, 365.01470947265625, 560.6461029052734], "score": 0.07423040270805359}, {"image_id": 0, "category_id": 3, "bbox": [235.5406951904297, 243.9565887451172, 923.9011993408203, 836.0434112548828], "score": 0.06240400671958923}, {"image_id": 0, "category_id": 3, "bbox": [142.617919921875, 603.7230834960938, 1060.0936279296875, 476.27691650390625], "score": 0.05449732393026352}, {"image_id": 0, "category_id": 3, "bbox": [715.101806640625, 417.32574462890625, 397.4493408203125, 605.2271728515625], "score": 0.05061645433306694}, {"image_id": 0, "category_id": 3, "bbox": [1299.212158203125, 247.1024169921875, 347.8077392578125, 648.6658935546875], "score": 0.04928262531757355}, {"image_id": 0, "category_id": 3, "bbox": [696.3334350585938, 273.67926025390625, 311.653564453125, 281.5560302734375], "score": 0.0483444482088089}, {"image_id": 0, "category_id": 3, "bbox": [695.0177001953125, 191.2881317138672, 404.6990966796875, 547.9367828369141], "score": 0.04781320318579674}, {"image_id": 0, "category_id": 3, "bbox": [515.4039916992188, 345.1026611328125, 1023.2372436523438, 734.8973388671875], "score": 0.04683841019868851}, {"image_id": 0, "category_id": 3, "bbox": [817.5917358398438, 766.8787231445312, 321.89239501953125, 273.73345947265625], "score": 0.04546593502163887}, {"image_id": 0, "category_id": 3, "bbox": [1227.412353515625, 357.1609191894531, 383.4111328125, 630.0343933105469], "score": 0.04503702372312546}, {"image_id": 0, "category_id": 3, "bbox": [641.516845703125, 303.2068786621094, 312.52618408203125, 277.1446838378906], "score": 0.04236442595720291}, {"image_id": 0, "category_id": 3, "bbox": [834.6945190429688, 21.451045989990234, 850.5206909179688, 952.0460243225098], "score": 0.040551796555519104}, {"image_id": 0, "category_id": 3, "bbox": [807.8016967773438, 498.6989440917969, 416.64410400390625, 568.8255920410156], "score": 0.03955165669322014}, {"image_id": 0, "category_id": 3, "bbox": [1673.1683349609375, 0.0, 246.8316650390625, 183.63592529296875], "score": 0.03916015103459358}, {"image_id": 0, "category_id": 3, "bbox": [676.8989868164062, 665.8003540039062, 315.14105224609375, 255.6705322265625], "score": 0.0386299192905426}, {"image_id": 0, "category_id": 3, "bbox": [771.0846557617188, 664.4556884765625, 318.89813232421875, 256.37261962890625], "score": 0.03843867406249046}, {"image_id": 0, "category_id": 3, "bbox": [337.8062438964844, 826.3405151367188, 329.0603942871094, 253.65948486328125], "score": 0.0381355918943882}, {"image_id": 0, "category_id": 3, "bbox": [677.9243774414062, 550.4613647460938, 317.20465087890625, 273.3955078125], "score": 0.03805278614163399}, {"image_id": 0, "category_id": 3, "bbox": [193.78884887695312, 238.8985137939453, 323.0917053222656, 253.2459259033203], "score": 0.037540555000305176}, {"image_id": 0, "category_id": 3, "bbox": [719.31591796875, 743.0276489257812, 327.4959716796875, 270.27532958984375], "score": 0.0373714305460453}, {"image_id": 0, "category_id": 3, "bbox": [46.373291015625, 132.6981201171875, 889.1526489257812, 926.0496826171875], "score": 0.03724218159914017}, {"image_id": 0, "category_id": 3, "bbox": [696.2403564453125, 723.4462890625, 448.326171875, 356.5537109375], "score": 0.03681308776140213}, {"image_id": 1, "category_id": 1, "bbox": [202.80299377441406, 48.89341735839844, 388.1920623779297, 577.3059844970703], "score": 0.2071688026189804}, {"image_id": 1, "category_id": 1, "bbox": [980.0903930664062, 175.3304901123047, 370.66510009765625, 532.2101593017578], "score": 0.12488597631454468}, {"image_id": 1, "category_id": 1, "bbox": [16.577224731445312, 93.52245330810547, 923.9291839599609, 940.0153884887695], "score": 0.07819413393735886}, {"image_id": 1, "category_id": 1, "bbox": [960.4220581054688, 322.23297119140625, 375.43121337890625, 590.427490234375], "score": 0.0654076635837555}, {"image_id": 1, "category_id": 1, "bbox": [1208.203369140625, 306.3695373535156, 416.3350830078125, 636.2903747558594], "score": 0.04966358467936516}, {"image_id": 1, "category_id": 1, "bbox": [0.0, 353.5009765625, 764.7324829101562, 726.4990234375], "score": 0.04921106994152069}, {"image_id": 1, "category_id": 1, "bbox": [250.98263549804688, 209.64566040039062, 350.4082946777344, 600.4137878417969], "score": 0.04739614203572273}, {"image_id": 1, "category_id": 1, "bbox": [1678.3331298828125, 0.0, 241.6668701171875, 215.86097717285156], "score": 0.04526751860976219}, {"image_id": 1, "category_id": 1, "bbox": [68.1489486694336, 19.873186111450195, 468.47843170166016, 549.8787670135498], "score": 0.04228946939110756}, {"image_id": 1, "category_id": 1, "bbox": [0.0, 31.226011276245117, 639.4743041992188, 934.0142841339111], "score": 0.04006609693169594}, {"image_id": 1, "category_id": 1, "bbox": [302.60888671875, 137.9506072998047, 1127.1011962890625, 900.5988311767578], "score": 0.03885152190923691}, {"image_id": 1, "category_id": 1, "bbox": [95.89942932128906, 242.96229553222656, 1091.658187866211, 837.0377044677734], "score": 0.03825405612587929}, {"image_id": 1, "category_id": 1, "bbox": [1080.826904296875, 176.1944580078125, 789.3431396484375, 845.7803955078125], "score": 0.032923080027103424}, {"image_id": 1, "category_id": 2, "bbox": [979.9208374023438, 130.60316467285156, 368.63226318359375, 538.6393890380859], "score": 0.4964984357357025}, {"image_id": 1, "category_id": 2, "bbox": [1057.33740234375, 133.38429260253906, 473.70849609375, 531.9385223388672], "score": 0.26362964510917664}, {"image_id": 1, "category_id": 2, "bbox": [967.6528930664062, 0.0, 375.30633544921875, 539.7909545898438], "score": 0.16914863884449005}, {"image_id": 1, "category_id": 2, "bbox": [861.7137451171875, 100.46588134765625, 434.4931640625, 601.0418701171875], "score": 0.1592649668455124}, {"image_id": 1, "category_id": 2, "bbox": [974.2996826171875, 230.30929565429688, 372.7330322265625, 653.2842102050781], "score": 0.12819692492485046}, {"image_id": 1, "category_id": 2, "bbox": [717.1983032226562, 60.745906829833984, 475.63507080078125, 579.6842079162598], "score": 0.07131838798522949}, {"image_id": 1, "category_id": 2, "bbox": [849.0694580078125, 0.0, 419.6287841796875, 522.9567260742188], "score": 0.06975005567073822}, {"image_id": 1, "category_id": 2, "bbox": [1080.9525146484375, 0.0, 427.8929443359375, 539.1176147460938], "score": 0.0679352730512619}, {"image_id": 1, "category_id": 2, "bbox": [1177.736572265625, 65.42182922363281, 446.274169921875, 585.9313812255859], "score": 0.06460730731487274}, {"image_id": 1, "category_id": 2, "bbox": [1143.9202880859375, 360.51812744140625, 415.880615234375, 614.0466918945312], "score": 0.0641966462135315}, {"image_id": 1, "category_id": 2, "bbox": [1205.340576171875, 263.2333984375, 423.830810546875, 623.4467163085938], "score": 0.059842243790626526}, {"image_id": 1, "category_id": 2, "bbox": [843.7665405273438, 271.6759948730469, 439.92218017578125, 572.4132995605469], "score": 0.05933408439159393}, {"image_id": 1, "category_id": 2, "bbox": [1101.8629150390625, 272.1551513671875, 413.378173828125, 587.8577270507812], "score": 0.05774245783686638}, {"image_id": 1, "category_id": 2, "bbox": [1197.2669677734375, 452.9737548828125, 404.32666015625, 623.77880859375], "score": 0.05150828883051872}, {"image_id": 1, "category_id": 2, "bbox": [1003.201171875, 375.7236633300781, 387.3780517578125, 585.8294372558594], "score": 0.04959609732031822}, {"image_id": 1, "category_id": 2, "bbox": [914.7984619140625, 430.3951110839844, 405.4527587890625, 587.9645690917969], "score": 0.048338308930397034}, {"image_id": 1, "category_id": 2, "bbox": [790.8104248046875, 684.5170288085938, 517.81884765625, 395.48297119140625], "score": 0.04748774692416191}, {"image_id": 1, "category_id": 2, "bbox": [697.1532592773438, 584.62451171875, 495.43280029296875, 495.37548828125], "score": 0.04718704894185066}, {"image_id": 1, "category_id": 2, "bbox": [1673.7528076171875, 0.0, 246.2471923828125, 190.01805114746094], "score": 0.047092270106077194}, {"image_id": 1, "category_id": 2, "bbox": [601.1651611328125, 678.6521606445312, 504.868896484375, 401.34783935546875], "score": 0.046771809458732605}, {"image_id": 1, "category_id": 2, "bbox": [970.5363159179688, 201.20680236816406, 313.78826904296875, 269.75868225097656], "score": 0.04578879475593567}, {"image_id": 1, "category_id": 2, "bbox": [192.10781860351562, 0.0, 408.2882385253906, 535.5543212890625], "score": 0.044811032712459564}, {"image_id": 1, "category_id": 2, "bbox": [871.6666870117188, 152.0116729736328, 318.02777099609375, 259.7932586669922], "score": 0.043680522590875626}, {"image_id": 1, "category_id": 2, "bbox": [972.110107421875, 119.11931610107422, 311.2896728515625, 273.5067825317383], "score": 0.04245423898100853}, {"image_id": 1, "category_id": 2, "bbox": [509.4417419433594, 589.6875610351562, 479.8036804199219, 490.31243896484375], "score": 0.04211155325174332}, {"image_id": 1, "category_id": 2, "bbox": [979.5938110351562, 600.1032104492188, 469.90826416015625, 479.89678955078125], "score": 0.041221000254154205}, {"image_id": 1, "category_id": 2, "bbox": [1357.7982177734375, 359.09442138671875, 458.8018798828125, 614.8314208984375], "score": 0.04084136337041855}, {"image_id": 1, "category_id": 2, "bbox": [200.1617889404297, 141.02706909179688, 371.5296173095703, 623.3135681152344], "score": 0.03988874703645706}, {"image_id": 1, "category_id": 2, "bbox": [800.1287841796875, 439.4930114746094, 935.9476318359375, 640.5069885253906], "score": 0.03921045362949371}, {"image_id": 1, "category_id": 2, "bbox": [310.3495788574219, 0.0, 423.7902526855469, 554.1644287109375], "score": 0.039209239184856415}, {"image_id": 1, "category_id": 2, "bbox": [1273.8453369140625, 131.06039428710938, 421.41943359375, 563.8321838378906], "score": 0.038920823484659195}, {"image_id": 1, "category_id": 2, "bbox": [666.93896484375, 768.7945556640625, 548.209716796875, 311.2054443359375], "score": 0.038239486515522}, {"image_id": 1, "category_id": 2, "bbox": [707.978759765625, 223.35340881347656, 487.03857421875, 564.0293426513672], "score": 0.037817973643541336}, {"image_id": 1, "category_id": 2, "bbox": [1061.8099365234375, 199.5303497314453, 317.015380859375, 272.3744354248047], "score": 0.037720926105976105}, {"image_id": 1, "category_id": 2, "bbox": [705.898193359375, 376.2648620605469, 483.199462890625, 591.3776550292969], "score": 0.037384580820798874}, {"image_id": 1, "category_id": 2, "bbox": [1316.1328125, 24.848215103149414, 390.4564208984375, 555.0884914398193], "score": 0.03677811473608017}, {"image_id": 1, "category_id": 2, "bbox": [1581.0010986328125, 335.2193908691406, 338.9989013671875, 660.8935852050781], "score": 0.036777034401893616}, {"image_id": 1, "category_id": 2, "bbox": [869.4412231445312, 765.56201171875, 532.8871459960938, 314.43798828125], "score": 0.036623165011405945}, {"image_id": 1, "category_id": 2, "bbox": [129.82164001464844, 277.7202453613281, 440.13063049316406, 583.1706237792969], "score": 0.0364743210375309}, {"image_id": 1, "category_id": 2, "bbox": [607.940185546875, 71.23744201660156, 457.1441650390625, 564.7398529052734], "score": 0.03579607978463173}, {"image_id": 1, "category_id": 2, "bbox": [150.06349182128906, 0.0, 468.1374969482422, 349.6485595703125], "score": 0.035432130098342896}, {"image_id": 1, "category_id": 2, "bbox": [534.4262084960938, 320.1931457519531, 1027.3093872070312, 759.8068542480469], "score": 0.03500848636031151}, {"image_id": 1, "category_id": 2, "bbox": [1297.5567626953125, 515.9488525390625, 387.5936279296875, 564.0511474609375], "score": 0.034989338368177414}, {"image_id": 1, "category_id": 2, "bbox": [1470.009765625, 473.75445556640625, 449.990234375, 599.0226440429688], "score": 0.034738689661026}, {"image_id": 1, "category_id": 2, "bbox": [1016.1448974609375, 390.4505310058594, 317.199462890625, 271.9722595214844], "score": 0.03472616896033287}, {"image_id": 1, "category_id": 2, "bbox": [1074.9833984375, 647.5311279296875, 482.15185546875, 432.4688720703125], "score": 0.03436722233891487}, {"image_id": 1, "category_id": 2, "bbox": [1016.2000732421875, 255.468994140625, 318.2659912109375, 271.09808349609375], "score": 0.03416362777352333}, {"image_id": 1, "category_id": 2, "bbox": [385.4162292480469, 0.0, 470.1957092285156, 458.5057067871094], "score": 0.033299319446086884}, {"image_id": 1, "category_id": 2, "bbox": [302.60888671875, 137.9506072998047, 1127.1011962890625, 900.5988311767578], "score": 0.03329755365848541}, {"image_id": 1, "category_id": 2, "bbox": [73.18597412109375, 74.59601593017578, 433.066162109375, 641.330436706543], "score": 0.032862477004528046}, {"image_id": 1, "category_id": 2, "bbox": [962.9189453125, 450.8254089355469, 325.4345703125, 257.7843933105469], "score": 0.032387521117925644}, {"image_id": 1, "category_id": 2, "bbox": [33.50555419921875, 219.21205139160156, 434.1578063964844, 587.1461029052734], "score": 0.03219788894057274}, {"image_id": 1, "category_id": 2, "bbox": [299.6593933105469, 0.0, 464.5139465332031, 336.22686767578125], "score": 0.032156024128198624}, {"image_id": 1, "category_id": 3, "bbox": [987.860595703125, 131.8839874267578, 375.306884765625, 535.6018524169922], "score": 0.5385345220565796}, {"image_id": 1, "category_id": 3, "bbox": [1057.2813720703125, 180.95823669433594, 473.6085205078125, 523.3455352783203], "score": 0.14141544699668884}, {"image_id": 1, "category_id": 3, "bbox": [1003.812744140625, 325.2362365722656, 366.272705078125, 581.5386657714844], "score": 0.11855906993150711}, {"image_id": 1, "category_id": 3, "bbox": [221.4788818359375, 53.003326416015625, 365.144775390625, 571.8098449707031], "score": 0.09569650143384933}, {"image_id": 1, "category_id": 3, "bbox": [967.6528930664062, 0.0, 375.30633544921875, 539.7909545898438], "score": 0.08662804961204529}, {"image_id": 1, "category_id": 3, "bbox": [95.89942932128906, 242.96229553222656, 1091.658187866211, 837.0377044677734], "score": 0.05968952178955078}, {"image_id": 1, "category_id": 3, "bbox": [848.2890625, 6.817076683044434, 441.870849609375, 595.4335947036743], "score": 0.059539567679166794}, {"image_id": 1, "category_id": 3, "bbox": [1208.203369140625, 306.3695373535156, 416.3350830078125, 636.2903747558594], "score": 0.057526879012584686}, {"image_id": 1, "category_id": 3, "bbox": [1018.930908203125, 196.21322631835938, 312.6258544921875, 276.56610107421875], "score": 0.05673537775874138}, {"image_id": 1, "category_id": 3, "bbox": [972.110107421875, 119.11931610107422, 311.2896728515625, 273.5067825317383], "score": 0.053518231958150864}, {"image_id": 1, "category_id": 3, "bbox": [1016.1448974609375, 390.4505310058594, 317.199462890625, 271.9722595214844], "score": 0.052769362926483154}, {"image_id": 1, "category_id": 3, "bbox": [1678.3331298828125, 0.0, 241.6668701171875, 215.86097717285156], "score": 0.05142246186733246}, {"image_id": 1, "category_id": 3, "bbox": [16.577224731445312, 93.52245330810547, 923.9291839599609, 940.0153884887695], "score": 0.04904355853796005}, {"image_id": 1, "category_id": 3, "bbox": [871.6666870117188, 152.0116729736328, 318.02777099609375, 259.7932586669922], "score": 0.04746595397591591}, {"image_id": 1, "category_id": 3, "bbox": [918.2660522460938, 204.9866485595703, 319.42950439453125, 263.36473083496094], "score": 0.045840222388505936}, {"image_id": 1, "category_id": 3, "bbox": [1108.9351806640625, 204.63426208496094, 319.12158203125, 263.57164001464844], "score": 0.04211726412177086}, {"image_id": 1, "category_id": 3, "bbox": [1017.8338623046875, 312.9434814453125, 322.0272216796875, 265.74224853515625], "score": 0.041849978268146515}, {"image_id": 1, "category_id": 3, "bbox": [969.9340209960938, 256.25274658203125, 321.77850341796875, 270.187255859375], "score": 0.041241779923439026}, {"image_id": 1, "category_id": 3, "bbox": [1063.9158935546875, 256.18804931640625, 318.087158203125, 270.8890380859375], "score": 0.04124132916331291}, {"image_id": 1, "category_id": 3, "bbox": [918.6217041015625, 419.4957275390625, 323.477783203125, 267.4356689453125], "score": 0.0399257056415081}, {"image_id": 1, "category_id": 3, "bbox": [1010.472900390625, 496.4913635253906, 321.6484375, 272.4706726074219], "score": 0.0388648621737957}, {"image_id": 1, "category_id": 3, "bbox": [0.0, 458.5977478027344, 1005.3092651367188, 621.4022521972656], "score": 0.03853829577565193}, {"image_id": 1, "category_id": 3, "bbox": [302.60888671875, 137.9506072998047, 1127.1011962890625, 900.5988311767578], "score": 0.03693285211920738}, {"image_id": 1, "category_id": 3, "bbox": [918.8399047851562, 72.67695617675781, 317.93096923828125, 255.63429260253906], "score": 0.03615153208374977}, {"image_id": 1, "category_id": 3, "bbox": [1013.2153930664062, 73.3503189086914, 319.56231689453125, 253.52364349365234], "score": 0.03585299476981163}, {"image_id": 1, "category_id": 3, "bbox": [1059.955322265625, 150.84774780273438, 315.49072265625, 260.4895935058594], "score": 0.03570360317826271}, {"image_id": 1, "category_id": 3, "bbox": [339.7130126953125, 183.0366668701172, 322.95556640625, 250.6807098388672], "score": 0.034840602427721024}, {"image_id": 1, "category_id": 3, "bbox": [1105.7183837890625, 475.2919921875, 324.7850341796875, 260.35467529296875], "score": 0.03438861295580864}, {"image_id": 1, "category_id": 3, "bbox": [384.33856201171875, 127.01840209960938, 324.55938720703125, 258.36773681640625], "score": 0.034326717257499695}, {"image_id": 1, "category_id": 3, "bbox": [250.98263549804688, 209.64566040039062, 350.4082946777344, 600.4137878417969], "score": 0.03429573029279709}, {"image_id": 1, "category_id": 3, "bbox": [1106.0472412109375, 320.80328369140625, 326.0528564453125, 253.4969482421875], "score": 0.03389299660921097}, {"image_id": 1, "category_id": 3, "bbox": [241.6955108642578, 183.80625915527344, 324.1178436279297, 251.6279754638672], "score": 0.032446227967739105}, {"image_id": 1, "category_id": 3, "bbox": [920.8851928710938, 341.8414611816406, 327.80914306640625, 261.7467346191406], "score": 0.032440971583127975}, {"image_id": 1, "category_id": 3, "bbox": [1738.33740234375, 12.845596313476562, 181.66259765625, 275.32130432128906], "score": 0.03244061395525932}, {"image_id": 2, "category_id": 1, "bbox": [716.1183471679688, 33.01786422729492, 198.82647705078125, 398.1567573547363], "score": 0.6469985246658325}, {"image_id": 2, "category_id": 1, "bbox": [334.2765808105469, 257.670166015625, 214.08755493164062, 387.21697998046875], "score": 0.5414273142814636}, {"image_id": 2, "category_id": 1, "bbox": [781.5842895507812, 412.160888671875, 205.26312255859375, 307.839111328125], "score": 0.5347500443458557}, {"image_id": 2, "category_id": 1, "bbox": [333.2310791015625, 374.855712890625, 218.4599609375, 345.144287109375], "score": 0.27309364080429077}, {"image_id": 2, "category_id": 1, "bbox": [591.6755981445312, 487.0100402832031, 206.61962890625, 232.98995971679688], "score": 0.2272769659757614}, {"image_id": 2, "category_id": 1, "bbox": [776.0943603515625, 251.705810546875, 225.33038330078125, 403.85284423828125], "score": 0.17131343483924866}, {"image_id": 2, "category_id": 1, "bbox": [740.3533935546875, 0.0, 243.1796875, 331.553955078125], "score": 0.1398344337940216}, {"image_id": 2, "category_id": 1, "bbox": [815.1553344726562, 0.0, 240.78240966796875, 349.6511535644531], "score": 0.12286520004272461}, {"image_id": 2, "category_id": 1, "bbox": [927.5269775390625, 191.724609375, 241.8394775390625, 441.5283203125], "score": 0.11989450454711914}, {"image_id": 2, "category_id": 1, "bbox": [743.2118530273438, 126.99493408203125, 238.4097900390625, 414.6474609375], "score": 0.11667796224355698}, {"image_id": 2, "category_id": 1, "bbox": [993.7218017578125, 0.0, 271.85302734375, 410.12396240234375], "score": 0.11388330906629562}, {"image_id": 2, "category_id": 1, "bbox": [802.46044921875, 61.79085159301758, 258.9998779296875, 404.6179618835449], "score": 0.11157487332820892}, {"image_id": 2, "category_id": 1, "bbox": [654.2705688476562, 0.0, 235.415283203125, 390.6324157714844], "score": 0.08890987932682037}, {"image_id": 2, "category_id": 1, "bbox": [679.5962524414062, 20.31258773803711, 577.1134643554688, 616.0102882385254], "score": 0.07925908267498016}, {"image_id": 2, "category_id": 1, "bbox": [855.5435791015625, 30.790695190429688, 276.889892578125, 411.5422821044922], "score": 0.07151168584823608}, {"image_id": 2, "category_id": 1, "bbox": [860.6309814453125, 142.93971252441406, 284.4814453125, 400.24363708496094], "score": 0.07121723145246506}, {"image_id": 2, "category_id": 1, "bbox": [609.1256103515625, 405.68170166015625, 243.75323486328125, 314.31829833984375], "score": 0.06747602671384811}, {"image_id": 2, "category_id": 1, "bbox": [198.13824462890625, 12.89794921875, 720.572998046875, 626.23388671875], "score": 0.06713633984327316}, {"image_id": 2, "category_id": 1, "bbox": [0.0, 8.666084289550781, 651.87158203125, 627.505729675293], "score": 0.0629500076174736}, {"image_id": 2, "category_id": 1, "bbox": [800.5415649414062, 169.28404235839844, 252.39434814453125, 417.84986877441406], "score": 0.06212380528450012}, {"image_id": 2, "category_id": 1, "bbox": [0.0, 272.78045654296875, 271.406494140625, 422.60546875], "score": 0.06189417839050293}, {"image_id": 2, "category_id": 1, "bbox": [648.2142333984375, 122.94953918457031, 225.2669677734375, 415.3713836669922], "score": 0.0558079369366169}, {"image_id": 2, "category_id": 1, "bbox": [687.2279052734375, 328.3665466308594, 292.4498291015625, 388.7452087402344], "score": 0.055604010820388794}, {"image_id": 2, "category_id": 1, "bbox": [15.0018310546875, 338.0758056640625, 619.5201416015625, 381.9241943359375], "score": 0.0511258989572525}, {"image_id": 2, "category_id": 1, "bbox": [469.66455078125, 402.2901611328125, 306.81341552734375, 317.7098388671875], "score": 0.04927913099527359}, {"image_id": 2, "category_id": 1, "bbox": [115.44842529296875, 97.15031433105469, 657.6534423828125, 592.4818878173828], "score": 0.04793230816721916}, {"image_id": 2, "category_id": 1, "bbox": [862.439453125, 335.13800048828125, 262.9146728515625, 378.19696044921875], "score": 0.04608742520213127}, {"image_id": 2, "category_id": 2, "bbox": [468.7154541015625, 340.1265563964844, 282.32476806640625, 353.2936096191406], "score": 0.3050895035266876}, {"image_id": 2, "category_id": 2, "bbox": [529.6630249023438, 378.9244384765625, 268.39532470703125, 341.0755615234375], "score": 0.24002502858638763}, {"image_id": 2, "category_id": 2, "bbox": [404.90289306640625, 404.6185607910156, 314.11724853515625, 315.3814392089844], "score": 0.2089109569787979}, {"image_id": 2, "category_id": 2, "bbox": [517.1099243164062, 260.34307861328125, 288.77301025390625, 373.46826171875], "score": 0.19250831007957458}, {"image_id": 2, "category_id": 2, "bbox": [335.9486389160156, 346.0096740722656, 204.64175415039062, 349.7707824707031], "score": 0.17123642563819885}, {"image_id": 2, "category_id": 2, "bbox": [584.3392944335938, 339.32598876953125, 290.60107421875, 353.3560791015625], "score": 0.16634908318519592}, {"image_id": 2, "category_id": 2, "bbox": [340.74371337890625, 403.4620361328125, 267.3587646484375, 316.5379638671875], "score": 0.15027303993701935}, {"image_id": 2, "category_id": 2, "bbox": [406.94091796875, 254.25259399414062, 303.1240234375, 392.7398986816406], "score": 0.1136213019490242}, {"image_id": 2, "category_id": 2, "bbox": [465.8544921875, 227.00198364257812, 283.04351806640625, 370.1658020019531], "score": 0.1080758273601532}, {"image_id": 2, "category_id": 2, "bbox": [779.517578125, 288.085693359375, 217.26123046875, 409.761474609375], "score": 0.10056225210428238}, {"image_id": 2, "category_id": 2, "bbox": [687.2279052734375, 328.3665466308594, 292.4498291015625, 388.7452087402344], "score": 0.09440126270055771}, {"image_id": 2, "category_id": 2, "bbox": [243.0077362060547, 392.9939270019531, 298.3233184814453, 327.0060729980469], "score": 0.08671364188194275}, {"image_id": 2, "category_id": 2, "bbox": [340.9293212890625, 257.3981018066406, 249.9754638671875, 390.0070495605469], "score": 0.07886775583028793}, {"image_id": 2, "category_id": 2, "bbox": [603.698486328125, 217.70782470703125, 277.39947509765625, 389.84259033203125], "score": 0.0772911086678505}, {"image_id": 2, "category_id": 2, "bbox": [705.9512939453125, 166.00985717773438, 232.5009765625, 415.5522155761719], "score": 0.07004109770059586}, {"image_id": 2, "category_id": 2, "bbox": [716.1183471679688, 33.01786422729492, 198.82647705078125, 398.1567573547363], "score": 0.06791006773710251}, {"image_id": 2, "category_id": 2, "bbox": [591.6755981445312, 487.0100402832031, 206.61962890625, 232.98995971679688], "score": 0.06605392694473267}, {"image_id": 2, "category_id": 2, "bbox": [782.4558715820312, 435.6845397949219, 203.274169921875, 284.3154602050781], "score": 0.06494831293821335}, {"image_id": 2, "category_id": 2, "bbox": [306.40130615234375, 150.4209747314453, 255.44818115234375, 397.0564422607422], "score": 0.05865931510925293}, {"image_id": 2, "category_id": 2, "bbox": [180.760986328125, 91.18614196777344, 253.5904541015625, 357.8690643310547], "score": 0.05769152566790581}, {"image_id": 2, "category_id": 2, "bbox": [802.2528076171875, 208.9149169921875, 240.9486083984375, 411.080322265625], "score": 0.0558679960668087}, {"image_id": 2, "category_id": 2, "bbox": [235.97689819335938, 122.7442855834961, 254.52435302734375, 364.9488296508789], "score": 0.05381862819194794}, {"image_id": 2, "category_id": 2, "bbox": [659.7426147460938, 480.04425048828125, 293.0218505859375, 239.95574951171875], "score": 0.05358342453837395}, {"image_id": 2, "category_id": 2, "bbox": [654.2705688476562, 0.0, 235.415283203125, 390.6324157714844], "score": 0.053301796317100525}, {"image_id": 2, "category_id": 2, "bbox": [648.2142333984375, 122.94953918457031, 225.2669677734375, 415.3713836669922], "score": 0.053011003881692886}, {"image_id": 2, "category_id": 2, "bbox": [147.39500427246094, 371.3238525390625, 329.84938049316406, 348.6761474609375], "score": 0.052651431411504745}, {"image_id": 2, "category_id": 2, "bbox": [862.439453125, 335.13800048828125, 262.9146728515625, 378.19696044921875], "score": 0.051309600472450256}, {"image_id": 2, "category_id": 2, "bbox": [160.64169311523438, 155.09120178222656, 288.6609191894531, 376.97874450683594], "score": 0.05082259699702263}, {"image_id": 2, "category_id": 2, "bbox": [98.3924560546875, 122.0090103149414, 286.3270568847656, 366.41222381591797], "score": 0.05081966146826744}, {"image_id": 2, "category_id": 2, "bbox": [927.3992309570312, 232.60025024414062, 242.94927978515625, 423.7073059082031], "score": 0.05071308836340904}, {"image_id": 2, "category_id": 2, "bbox": [815.1553344726562, 0.0, 240.78240966796875, 349.6511535644531], "score": 0.05022132024168968}, {"image_id": 2, "category_id": 2, "bbox": [735.0808715820312, 76.85767364501953, 242.8267822265625, 434.5238571166992], "score": 0.04981868341565132}, {"image_id": 2, "category_id": 2, "bbox": [864.6588134765625, 162.20660400390625, 280.309814453125, 434.82232666015625], "score": 0.04920060560107231}, {"image_id": 2, "category_id": 2, "bbox": [18.01129150390625, 92.94086456298828, 614.58251953125, 606.0074996948242], "score": 0.04908081516623497}, {"image_id": 2, "category_id": 2, "bbox": [559.6688842773438, 140.61448669433594, 264.4442138671875, 393.2423858642578], "score": 0.04870881140232086}, {"image_id": 2, "category_id": 2, "bbox": [559.020751953125, 14.123096466064453, 297.3948974609375, 430.3543510437012], "score": 0.04608292877674103}, {"image_id": 2, "category_id": 3, "bbox": [465.35394287109375, 295.9808654785156, 285.955810546875, 368.4263000488281], "score": 0.44076138734817505}, {"image_id": 2, "category_id": 3, "bbox": [335.9486389160156, 346.0096740722656, 204.64175415039062, 349.7707824707031], "score": 0.3247727155685425}, {"image_id": 2, "category_id": 3, "bbox": [523.6324462890625, 343.859130859375, 272.05389404296875, 346.34320068359375], "score": 0.30665889382362366}, {"image_id": 2, "category_id": 3, "bbox": [415.7672119140625, 377.5996398925781, 306.9423828125, 342.4003601074219], "score": 0.26151832938194275}, {"image_id": 2, "category_id": 3, "bbox": [778.4273071289062, 327.9366760253906, 215.45880126953125, 392.0633239746094], "score": 0.21306046843528748}, {"image_id": 2, "category_id": 3, "bbox": [340.74371337890625, 403.4620361328125, 267.3587646484375, 316.5379638671875], "score": 0.16517065465450287}, {"image_id": 2, "category_id": 3, "bbox": [584.49560546875, 442.4208679199219, 216.6456298828125, 277.5791320800781], "score": 0.16464999318122864}, {"image_id": 2, "category_id": 3, "bbox": [716.1183471679688, 33.01786422729492, 198.82647705078125, 398.1567573547363], "score": 0.15953536331653595}, {"image_id": 2, "category_id": 3, "bbox": [584.3392944335938, 339.32598876953125, 290.60107421875, 353.3560791015625], "score": 0.15136520564556122}, {"image_id": 2, "category_id": 3, "bbox": [340.9293212890625, 257.3981018066406, 249.9754638671875, 390.0070495605469], "score": 0.1309502273797989}, {"image_id": 2, "category_id": 3, "bbox": [927.3992309570312, 232.60025024414062, 242.94927978515625, 423.7073059082031], "score": 0.10498177260160446}, {"image_id": 2, "category_id": 3, "bbox": [801.0552978515625, 254.1939239501953, 238.55517578125, 396.5814666748047], "score": 0.09740360081195831}, {"image_id": 2, "category_id": 3, "bbox": [743.2118530273438, 126.99493408203125, 238.4097900390625, 414.6474609375], "score": 0.09367235004901886}, {"image_id": 2, "category_id": 3, "bbox": [378.35528564453125, 169.94345092773438, 659.3099975585938, 550.0565490722656], "score": 0.08215869963169098}, {"image_id": 2, "category_id": 3, "bbox": [740.3533935546875, 0.0, 243.1796875, 331.553955078125], "score": 0.08097793906927109}, {"image_id": 2, "category_id": 3, "bbox": [120.68417358398438, 180.28855895996094, 641.7084045410156, 539.7114410400391], "score": 0.07723692804574966}, {"image_id": 2, "category_id": 3, "bbox": [796.0755004882812, 438.0318298339844, 243.19879150390625, 281.9681701660156], "score": 0.0706152468919754}, {"image_id": 2, "category_id": 3, "bbox": [269.719482421875, 338.7680358886719, 613.2383422851562, 381.2319641113281], "score": 0.0693223848938942}, {"image_id": 2, "category_id": 3, "bbox": [815.1553344726562, 0.0, 240.78240966796875, 349.6511535644531], "score": 0.06580016762018204}, {"image_id": 2, "category_id": 3, "bbox": [798.2135009765625, 104.08138275146484, 262.519287109375, 390.5770034790039], "score": 0.06532440334558487}, {"image_id": 2, "category_id": 3, "bbox": [673.9559936523438, 290.5751647949219, 299.43682861328125, 394.5818786621094], "score": 0.06421011686325073}, {"image_id": 2, "category_id": 3, "bbox": [6.56268310546875, 259.1528625488281, 626.3397827148438, 460.8471374511719], "score": 0.06076177954673767}, {"image_id": 2, "category_id": 3, "bbox": [419.4716796875, 457.4134216308594, 210.6640625, 183.69711303710938], "score": 0.057710759341716766}, {"image_id": 2, "category_id": 3, "bbox": [198.13824462890625, 12.89794921875, 720.572998046875, 626.23388671875], "score": 0.05747276544570923}, {"image_id": 2, "category_id": 3, "bbox": [452.56610107421875, 421.88165283203125, 210.6337890625, 180.91033935546875], "score": 0.05695047974586487}, {"image_id": 2, "category_id": 3, "bbox": [463.74224853515625, 338.0999755859375, 706.7877807617188, 381.9000244140625], "score": 0.055746372789144516}, {"image_id": 2, "category_id": 3, "bbox": [530.2576904296875, 225.9068145751953, 279.74188232421875, 372.7286834716797], "score": 0.055705711245536804}, {"image_id": 2, "category_id": 3, "bbox": [550.1045532226562, 423.62158203125, 209.1629638671875, 175.8369140625], "score": 0.05497250333428383}, {"image_id": 2, "category_id": 3, "bbox": [679.5962524414062, 20.31258773803711, 577.1134643554688, 616.0102882385254], "score": 0.05453750491142273}, {"image_id": 2, "category_id": 3, "bbox": [480.35247802734375, 457.35650634765625, 217.55206298828125, 182.35479736328125], "score": 0.05086026340723038}, {"image_id": 2, "category_id": 3, "bbox": [860.6309814453125, 142.93971252441406, 284.4814453125, 400.24363708496094], "score": 0.050134215503931046}, {"image_id": 2, "category_id": 3, "bbox": [648.2142333984375, 122.94953918457031, 225.2669677734375, 415.3713836669922], "score": 0.04803122952580452}, {"image_id": 2, "category_id": 3, "bbox": [855.5435791015625, 30.790695190429688, 276.889892578125, 411.5422821044922], "score": 0.047545745968818665}, {"image_id": 2, "category_id": 3, "bbox": [356.91943359375, 383.6956481933594, 212.92022705078125, 182.99612426757812], "score": 0.0465291365981102}, {"image_id": 2, "category_id": 3, "bbox": [551.9226684570312, 372.4231262207031, 206.78228759765625, 172.13223266601562], "score": 0.04638904705643654}, {"image_id": 2, "category_id": 3, "bbox": [483.9233093261719, 389.48095703125, 213.26370239257812, 173.57696533203125], "score": 0.04624783992767334}, {"image_id": 2, "category_id": 3, "bbox": [876.198974609375, 299.3984375, 243.1624755859375, 376.6092529296875], "score": 0.04604649916291237}, {"image_id": 3, "category_id": 1, "bbox": [1035.6612548828125, 214.9031524658203, 336.342529296875, 666.3300628662109], "score": 0.795666515827179}, {"image_id": 3, "category_id": 1, "bbox": [1069.2259521484375, 183.0326690673828, 438.4307861328125, 662.6085662841797], "score": 0.3779263496398926}, {"image_id": 3, "category_id": 1, "bbox": [200.34695434570312, 46.225128173828125, 400.8544006347656, 586.0025939941406], "score": 0.20462119579315186}, {"image_id": 3, "category_id": 1, "bbox": [985.2901611328125, 124.33424377441406, 354.4957275390625, 626.8624725341797], "score": 0.1739465594291687}, {"image_id": 3, "category_id": 1, "bbox": [742.7831420898438, 451.75360107421875, 393.57781982421875, 521.4494018554688], "score": 0.135039284825325}, {"image_id": 3, "category_id": 1, "bbox": [8.739669799804688, 26.403854370117188, 891.2755279541016, 951.6772003173828], "score": 0.09225242584943771}, {"image_id": 3, "category_id": 1, "bbox": [837.7506103515625, 183.6251678466797, 464.0509033203125, 654.3900909423828], "score": 0.07524938881397247}, {"image_id": 3, "category_id": 1, "bbox": [741.9559936523438, 265.7646179199219, 389.01910400390625, 611.9831848144531], "score": 0.057572342455387115}, {"image_id": 3, "category_id": 1, "bbox": [919.6919555664062, 364.5243225097656, 388.20098876953125, 597.3288269042969], "score": 0.048295531421899796}, {"image_id": 3, "category_id": 1, "bbox": [131.76718139648438, 121.92403411865234, 1037.1733703613281, 907.2949600219727], "score": 0.04827354475855827}, {"image_id": 3, "category_id": 1, "bbox": [59.87779998779297, 18.122446060180664, 482.55078887939453, 559.7399806976318], "score": 0.04303886368870735}, {"image_id": 3, "category_id": 1, "bbox": [0.0, 363.56982421875, 773.1881103515625, 716.43017578125], "score": 0.04267927631735802}, {"image_id": 3, "category_id": 1, "bbox": [248.6608428955078, 200.8115997314453, 353.2211151123047, 610.8721160888672], "score": 0.040682680904865265}, {"image_id": 3, "category_id": 1, "bbox": [750.4139404296875, 43.66164016723633, 1007.6400146484375, 893.0037651062012], "score": 0.04039163514971733}, {"image_id": 3, "category_id": 1, "bbox": [0.0, 480.26776123046875, 356.0354309082031, 599.7322387695312], "score": 0.040319837629795074}, {"image_id": 3, "category_id": 1, "bbox": [623.5997924804688, 157.352783203125, 925.9807739257812, 859.0802612304688], "score": 0.03898235037922859}, {"image_id": 3, "category_id": 1, "bbox": [41.2833251953125, 0.0, 856.59619140625, 585.2740478515625], "score": 0.038785286247730255}, {"image_id": 3, "category_id": 2, "bbox": [742.7831420898438, 451.75360107421875, 393.57781982421875, 521.4494018554688], "score": 0.33278757333755493}, {"image_id": 3, "category_id": 2, "bbox": [741.9559936523438, 265.7646179199219, 389.01910400390625, 611.9831848144531], "score": 0.19564616680145264}, {"image_id": 3, "category_id": 2, "bbox": [883.453125, 442.5852355957031, 436.8837890625, 545.0244445800781], "score": 0.15785518288612366}, {"image_id": 3, "category_id": 2, "bbox": [844.4725341796875, 0.0, 422.69482421875, 497.2137756347656], "score": 0.11376787722110748}, {"image_id": 3, "category_id": 2, "bbox": [655.5657348632812, 226.45999145507812, 406.67559814453125, 583.5442199707031], "score": 0.10200759023427963}, {"image_id": 3, "category_id": 2, "bbox": [789.0429077148438, 559.0130615234375, 440.40155029296875, 520.9869384765625], "score": 0.0967077761888504}, {"image_id": 3, "category_id": 2, "bbox": [736.6517944335938, 21.974321365356445, 397.96795654296875, 544.3265209197998], "score": 0.09434083849191666}, {"image_id": 3, "category_id": 2, "bbox": [925.2030639648438, 0.0, 436.18585205078125, 431.1753234863281], "score": 0.0921623483300209}, {"image_id": 3, "category_id": 2, "bbox": [978.596923828125, 183.8644256591797, 376.5323486328125, 664.3173980712891], "score": 0.09140130877494812}, {"image_id": 3, "category_id": 2, "bbox": [538.2086181640625, 437.48333740234375, 461.23968505859375, 554.9873046875], "score": 0.08996011316776276}, {"image_id": 3, "category_id": 2, "bbox": [673.3460083007812, 508.87548828125, 411.55865478515625, 571.12451171875], "score": 0.0890321135520935}, {"image_id": 3, "category_id": 2, "bbox": [975.360595703125, 481.3533935546875, 454.305419921875, 570.73876953125], "score": 0.0842755138874054}, {"image_id": 3, "category_id": 2, "bbox": [900.4373168945312, 613.9129028320312, 438.14556884765625, 466.08709716796875], "score": 0.08182763308286667}, {"image_id": 3, "category_id": 2, "bbox": [637.0223388671875, 15.15922737121582, 398.96240234375, 557.1377696990967], "score": 0.07100741565227509}, {"image_id": 3, "category_id": 2, "bbox": [866.162841796875, 85.76036071777344, 383.2100830078125, 552.0684967041016], "score": 0.06706978380680084}, {"image_id": 3, "category_id": 2, "bbox": [526.8665771484375, 263.37530517578125, 461.50799560546875, 596.9874877929688], "score": 0.06696736812591553}, {"image_id": 3, "category_id": 2, "bbox": [954.5230102539062, 25.777118682861328, 361.16204833984375, 571.1542167663574], "score": 0.0645500123500824}, {"image_id": 3, "category_id": 2, "bbox": [1031.97509765625, 318.7236328125, 344.675537109375, 659.6461791992188], "score": 0.061543576419353485}, {"image_id": 3, "category_id": 2, "bbox": [696.748779296875, 689.76171875, 481.3115234375, 390.23828125], "score": 0.05915040522813797}, {"image_id": 3, "category_id": 2, "bbox": [986.7041625976562, 692.6589965820312, 458.85186767578125, 387.34100341796875], "score": 0.055616676807403564}, {"image_id": 3, "category_id": 2, "bbox": [1069.2259521484375, 183.0326690673828, 438.4307861328125, 662.6085662841797], "score": 0.05321633070707321}, {"image_id": 3, "category_id": 2, "bbox": [131.76718139648438, 121.92403411865234, 1037.1733703613281, 907.2949600219727], "score": 0.051471877843141556}, {"image_id": 3, "category_id": 2, "bbox": [190.4344940185547, 0.0, 414.5518341064453, 545.5484619140625], "score": 0.049227025359869}, {"image_id": 3, "category_id": 2, "bbox": [1080.5438232421875, 0.0, 406.833251953125, 450.50543212890625], "score": 0.048648107796907425}, {"image_id": 3, "category_id": 2, "bbox": [1079.1932373046875, 468.0555419921875, 461.343017578125, 598.882568359375], "score": 0.04832124710083008}, {"image_id": 3, "category_id": 2, "bbox": [420.72161865234375, 387.0404968261719, 456.364013671875, 569.3083190917969], "score": 0.04584898799657822}, {"image_id": 3, "category_id": 2, "bbox": [870.258056640625, 164.85987854003906, 322.4080810546875, 231.62110900878906], "score": 0.044634800404310226}, {"image_id": 3, "category_id": 2, "bbox": [1194.8359375, 411.9163818359375, 429.47265625, 607.2741088867188], "score": 0.04450088366866112}, {"image_id": 3, "category_id": 2, "bbox": [514.9874267578125, 11.4136962890625, 432.60467529296875, 567.0927124023438], "score": 0.04438110813498497}, {"image_id": 3, "category_id": 2, "bbox": [776.2628173828125, 456.39691162109375, 964.5023193359375, 623.6030883789062], "score": 0.04431026056408882}, {"image_id": 3, "category_id": 2, "bbox": [1060.64892578125, 78.68087005615234, 294.814697265625, 593.7388687133789], "score": 0.043137937784194946}, {"image_id": 3, "category_id": 2, "bbox": [494.9678649902344, 682.2994995117188, 505.4728698730469, 397.70050048828125], "score": 0.04281463474035263}, {"image_id": 3, "category_id": 2, "bbox": [785.0187377929688, 759.992431640625, 504.87249755859375, 320.007568359375], "score": 0.042654745280742645}, {"image_id": 3, "category_id": 2, "bbox": [336.1625061035156, 367.0260925292969, 1035.9311218261719, 712.9739074707031], "score": 0.041560474783182144}, {"image_id": 3, "category_id": 2, "bbox": [968.2842407226562, 161.52330017089844, 318.98699951171875, 238.69908142089844], "score": 0.0406038835644722}, {"image_id": 3, "category_id": 2, "bbox": [695.0131225585938, 0.0, 468.98651123046875, 399.41217041015625], "score": 0.040544040501117706}, {"image_id": 3, "category_id": 2, "bbox": [769.1004638671875, 656.0851440429688, 325.2445068359375, 278.58221435546875], "score": 0.04005439952015877}, {"image_id": 3, "category_id": 2, "bbox": [52.84666442871094, 0.0, 856.5525054931641, 500.70489501953125], "score": 0.039228565990924835}, {"image_id": 3, "category_id": 2, "bbox": [318.3122253417969, 0.0, 422.2832946777344, 566.4256591796875], "score": 0.03913244605064392}, {"image_id": 3, "category_id": 2, "bbox": [588.1344604492188, 746.6381225585938, 506.21075439453125, 333.36187744140625], "score": 0.038427457213401794}, {"image_id": 3, "category_id": 2, "bbox": [418.6874084472656, 218.7036590576172, 431.4969177246094, 588.8452911376953], "score": 0.03838829696178436}, {"image_id": 3, "category_id": 2, "bbox": [1672.207763671875, 0.0, 247.792236328125, 182.04176330566406], "score": 0.03820976987481117}, {"image_id": 3, "category_id": 2, "bbox": [1293.9796142578125, 466.7366943359375, 381.8045654296875, 597.0625], "score": 0.03819340467453003}, {"image_id": 3, "category_id": 2, "bbox": [1312.2000732421875, 0.0, 389.178955078125, 512.6004028320312], "score": 0.03804561495780945}, {"image_id": 3, "category_id": 2, "bbox": [0.0, 122.388916015625, 747.257568359375, 915.013916015625], "score": 0.03756659850478172}, {"image_id": 3, "category_id": 2, "bbox": [345.0478820800781, 52.268211364746094, 1057.6800231933594, 894.2400894165039], "score": 0.03748759627342224}, {"image_id": 3, "category_id": 3, "bbox": [742.7831420898438, 451.75360107421875, 393.57781982421875, 521.4494018554688], "score": 0.7024580240249634}, {"image_id": 3, "category_id": 3, "bbox": [741.9559936523438, 265.7646179199219, 389.01910400390625, 611.9831848144531], "score": 0.37237122654914856}, {"image_id": 3, "category_id": 3, "bbox": [883.453125, 442.5852355957031, 436.8837890625, 545.0244445800781], "score": 0.1760273426771164}, {"image_id": 3, "category_id": 3, "bbox": [977.3419189453125, 218.68470764160156, 375.0968017578125, 669.4812469482422], "score": 0.16969044506549835}, {"image_id": 3, "category_id": 3, "bbox": [990.8377075195312, 408.36083984375, 412.28411865234375, 605.7662963867188], "score": 0.07654619961977005}, {"image_id": 3, "category_id": 3, "bbox": [218.69102478027344, 50.28588104248047, 374.21620178222656, 580.2226028442383], "score": 0.07464182376861572}, {"image_id": 3, "category_id": 3, "bbox": [655.5657348632812, 226.45999145507812, 406.67559814453125, 583.5442199707031], "score": 0.07416342198848724}, {"image_id": 3, "category_id": 3, "bbox": [1048.3018798828125, 127.14530944824219, 304.35498046875, 617.8072052001953], "score": 0.07337816059589386}, {"image_id": 3, "category_id": 3, "bbox": [623.5997924804688, 157.352783203125, 925.9807739257812, 859.0802612304688], "score": 0.071251280605793}, {"image_id": 3, "category_id": 3, "bbox": [927.18310546875, 0.0, 410.65283203125, 493.6187744140625], "score": 0.06908043473958969}, {"image_id": 3, "category_id": 3, "bbox": [826.3679809570312, 491.2876892089844, 312.45404052734375, 285.4236145019531], "score": 0.06830185651779175}, {"image_id": 3, "category_id": 3, "bbox": [107.06520080566406, 42.26438522338867, 1061.8809661865234, 923.4366035461426], "score": 0.06361979246139526}, {"image_id": 3, "category_id": 3, "bbox": [0.0, 266.4491271972656, 988.76953125, 813.5508728027344], "score": 0.0635080561041832}, {"image_id": 3, "category_id": 3, "bbox": [1070.884765625, 221.3953857421875, 432.144287109375, 658.6356811523438], "score": 0.05751591548323631}, {"image_id": 3, "category_id": 3, "bbox": [776.7738647460938, 546.7811889648438, 313.62420654296875, 282.681640625], "score": 0.05742703750729561}, {"image_id": 3, "category_id": 3, "bbox": [731.05419921875, 495.14013671875, 317.9976806640625, 278.52655029296875], "score": 0.05703343451023102}, {"image_id": 3, "category_id": 3, "bbox": [851.6851196289062, 33.223426818847656, 398.10125732421875, 530.9442367553711], "score": 0.05595961585640907}, {"image_id": 3, "category_id": 3, "bbox": [750.4139404296875, 43.66164016723633, 1007.6400146484375, 893.0037651062012], "score": 0.05315830186009407}, {"image_id": 3, "category_id": 3, "bbox": [837.7506103515625, 183.6251678466797, 464.0509033203125, 654.3900909423828], "score": 0.05269654467701912}, {"image_id": 3, "category_id": 3, "bbox": [726.2228393554688, 657.1477661132812, 323.61456298828125, 275.0631103515625], "score": 0.05201522260904312}, {"image_id": 3, "category_id": 3, "bbox": [339.3540344238281, 269.9506530761719, 1053.2279968261719, 810.0493469238281], "score": 0.05159247666597366}, {"image_id": 3, "category_id": 3, "bbox": [918.0313720703125, 524.1131591796875, 317.548095703125, 275.23175048828125], "score": 0.05126826465129852}, {"image_id": 3, "category_id": 3, "bbox": [814.9613037109375, 658.3031616210938, 324.4761962890625, 274.94476318359375], "score": 0.04700478911399841}, {"image_id": 3, "category_id": 3, "bbox": [79.49295043945312, 465.938232421875, 1135.0966491699219, 614.061767578125], "score": 0.04572653770446777}, {"image_id": 3, "category_id": 3, "bbox": [624.7630004882812, 659.6362915039062, 328.97479248046875, 272.54193115234375], "score": 0.045429181307554245}, {"image_id": 3, "category_id": 3, "bbox": [973.4158325195312, 133.00636291503906, 312.53619384765625, 243.88768005371094], "score": 0.043456628918647766}, {"image_id": 3, "category_id": 3, "bbox": [538.2086181640625, 437.48333740234375, 461.23968505859375, 554.9873046875], "score": 0.04294494912028313}, {"image_id": 3, "category_id": 3, "bbox": [873.5597534179688, 136.37371826171875, 319.02740478515625, 238.24618530273438], "score": 0.042737264186143875}, {"image_id": 3, "category_id": 3, "bbox": [679.2223510742188, 577.755615234375, 324.75616455078125, 276.8284912109375], "score": 0.04248875379562378}, {"image_id": 3, "category_id": 3, "bbox": [914.7859497070312, 605.1689453125, 324.48175048828125, 275.6666259765625], "score": 0.042463745921850204}, {"image_id": 3, "category_id": 3, "bbox": [967.7005004882812, 468.32611083984375, 322.27301025390625, 277.87835693359375], "score": 0.042417485266923904}, {"image_id": 3, "category_id": 3, "bbox": [758.3052978515625, 346.10693359375, 981.749267578125, 733.89306640625], "score": 0.04214789718389511}, {"image_id": 3, "category_id": 3, "bbox": [778.629638671875, 447.8275146484375, 318.2640380859375, 267.75042724609375], "score": 0.04211647808551788}, {"image_id": 3, "category_id": 3, "bbox": [673.3460083007812, 508.87548828125, 411.55865478515625, 571.12451171875], "score": 0.04160328581929207}, {"image_id": 3, "category_id": 3, "bbox": [870.3949584960938, 447.668701171875, 318.74066162109375, 266.49053955078125], "score": 0.04017994552850723}, {"image_id": 3, "category_id": 3, "bbox": [1672.207763671875, 0.0, 247.792236328125, 182.04176330566406], "score": 0.03825407847762108}, {"image_id": 3, "category_id": 3, "bbox": [0.0, 43.26118850708008, 734.3677368164062, 934.2781791687012], "score": 0.03823452070355415}, {"image_id": 4, "category_id": 1, "bbox": [385.3401794433594, 331.0753173828125, 242.55795288085938, 379.322998046875], "score": 0.20565354824066162}, {"image_id": 4, "category_id": 1, "bbox": [171.52679443359375, 5.440498352050781, 274.6717529296875, 387.7549057006836], "score": 0.12742987275123596}, {"image_id": 4, "category_id": 1, "bbox": [586.066162109375, 14.718589782714844, 556.770263671875, 633.8289566040039], "score": 0.10413916409015656}, {"image_id": 4, "category_id": 1, "bbox": [384.02935791015625, 413.5424499511719, 343.22479248046875, 306.4575500488281], "score": 0.07793685793876648}, {"image_id": 4, "category_id": 1, "bbox": [923.0130004882812, 166.5990753173828, 242.66741943359375, 432.8295135498047], "score": 0.07398378103971481}, {"image_id": 4, "category_id": 1, "bbox": [28.508636474609375, 82.23001861572266, 602.0848693847656, 620.9732284545898], "score": 0.0642198994755745}, {"image_id": 4, "category_id": 1, "bbox": [411.98577880859375, 256.82952880859375, 252.073974609375, 395.34869384765625], "score": 0.060756392776966095}, {"image_id": 4, "category_id": 1, "bbox": [1060.9808349609375, 0.0, 219.0191650390625, 406.39959716796875], "score": 0.055908430367708206}, {"image_id": 4, "category_id": 1, "bbox": [459.2627868652344, 289.3208312988281, 289.4360656738281, 394.8551940917969], "score": 0.055060870945453644}, {"image_id": 4, "category_id": 1, "bbox": [692.9390869140625, 111.30867004394531, 527.76806640625, 579.2015228271484], "score": 0.05182042345404625}, {"image_id": 4, "category_id": 1, "bbox": [0.0, 240.39627075195312, 278.9359436035156, 433.2909851074219], "score": 0.04528999701142311}, {"image_id": 4, "category_id": 1, "bbox": [517.4312133789062, 416.31109619140625, 274.44122314453125, 303.68890380859375], "score": 0.042836230248212814}, {"image_id": 4, "category_id": 1, "bbox": [98.95840454101562, 251.74676513671875, 689.3782653808594, 468.25323486328125], "score": 0.039635952562093735}, {"image_id": 4, "category_id": 1, "bbox": [0.0, 245.2276153564453, 492.25872802734375, 474.7723846435547], "score": 0.03913917392492294}, {"image_id": 4, "category_id": 1, "bbox": [0.0, 88.33069610595703, 381.7953186035156, 609.8956832885742], "score": 0.03875281661748886}, {"image_id": 4, "category_id": 1, "bbox": [874.0477294921875, 222.90110778808594, 264.3438720703125, 439.4870147705078], "score": 0.03814984858036041}, {"image_id": 4, "category_id": 1, "bbox": [276.2272033691406, 324.1025695800781, 597.7773742675781, 395.8974304199219], "score": 0.03809167444705963}, {"image_id": 4, "category_id": 1, "bbox": [242.16680908203125, 87.65390014648438, 684.6420288085938, 603.4159240722656], "score": 0.035137489438056946}, {"image_id": 4, "category_id": 1, "bbox": [991.0222778320312, 172.85389709472656, 265.28741455078125, 419.1145477294922], "score": 0.03450204059481621}, {"image_id": 4, "category_id": 1, "bbox": [210.33287048339844, 65.17728424072266, 273.50660705566406, 392.2747116088867], "score": 0.034375060349702835}, {"image_id": 4, "category_id": 2, "bbox": [463.24688720703125, 444.5153503417969, 312.92388916015625, 275.4846496582031], "score": 0.29792535305023193}, {"image_id": 4, "category_id": 2, "bbox": [364.24554443359375, 461.3350830078125, 293.8868408203125, 258.6649169921875], "score": 0.1480303257703781}, {"image_id": 4, "category_id": 2, "bbox": [385.3401794433594, 331.0753173828125, 242.55795288085938, 379.322998046875], "score": 0.0936276912689209}, {"image_id": 4, "category_id": 2, "bbox": [459.2627868652344, 289.3208312988281, 289.4360656738281, 394.8551940917969], "score": 0.07238870859146118}, {"image_id": 4, "category_id": 2, "bbox": [313.253662109375, 407.3858947753906, 283.1151123046875, 312.6141052246094], "score": 0.07221681624650955}, {"image_id": 4, "category_id": 2, "bbox": [523.567138671875, 322.2514343261719, 276.173095703125, 397.7485656738281], "score": 0.07035089284181595}, {"image_id": 4, "category_id": 2, "bbox": [584.4622802734375, 478.4195556640625, 308.15478515625, 241.5804443359375], "score": 0.06847097724676132}, {"image_id": 4, "category_id": 2, "bbox": [656.828125, 444.02215576171875, 298.1566162109375, 275.97784423828125], "score": 0.06775818765163422}, {"image_id": 4, "category_id": 2, "bbox": [583.0355224609375, 336.70538330078125, 303.4783935546875, 373.43157958984375], "score": 0.051486872136592865}, {"image_id": 4, "category_id": 2, "bbox": [711.1127319335938, 482.1814270019531, 324.67645263671875, 237.81857299804688], "score": 0.04926062375307083}, {"image_id": 4, "category_id": 2, "bbox": [411.98577880859375, 256.82952880859375, 252.073974609375, 395.34869384765625], "score": 0.0462108850479126}, {"image_id": 4, "category_id": 2, "bbox": [773.3360595703125, 443.8004150390625, 328.9986572265625, 276.1995849609375], "score": 0.0451497957110405}, {"image_id": 4, "category_id": 2, "bbox": [719.861328125, 0.0, 317.6123046875, 409.96002197265625], "score": 0.0440484918653965}, {"image_id": 4, "category_id": 2, "bbox": [208.54037475585938, 440.58056640625, 321.6050109863281, 279.41943359375], "score": 0.043189119547605515}, {"image_id": 4, "category_id": 2, "bbox": [700.5787353515625, 0.0, 341.9344482421875, 235.17906188964844], "score": 0.040446508675813675}, {"image_id": 4, "category_id": 2, "bbox": [624.7281494140625, 0.0, 273.13323974609375, 403.5745849609375], "score": 0.04029224440455437}, {"image_id": 4, "category_id": 2, "bbox": [295.3443603515625, 288.13299560546875, 306.97314453125, 397.76007080078125], "score": 0.03998987749218941}, {"image_id": 4, "category_id": 2, "bbox": [484.077392578125, 548.5149536132812, 212.91571044921875, 171.48504638671875], "score": 0.03827877715229988}, {"image_id": 4, "category_id": 2, "bbox": [28.508636474609375, 82.23001861572266, 602.0848693847656, 620.9732284545898], "score": 0.03810979425907135}, {"image_id": 4, "category_id": 2, "bbox": [442.0533752441406, 605.1209106445312, 230.36288452148438, 114.87908935546875], "score": 0.037749651819467545}, {"image_id": 4, "category_id": 2, "bbox": [655.98486328125, 0.0, 301.8909912109375, 319.1214294433594], "score": 0.03736936300992966}, {"image_id": 4, "category_id": 2, "bbox": [717.9904174804688, 320.8404541015625, 300.32183837890625, 393.52630615234375], "score": 0.037196267396211624}, {"image_id": 4, "category_id": 2, "bbox": [496.1611328125, 0.0, 278.63360595703125, 311.3276672363281], "score": 0.037190891802310944}, {"image_id": 4, "category_id": 2, "bbox": [545.3327026367188, 216.5563201904297, 268.19549560546875, 399.95289611816406], "score": 0.03689523786306381}, {"image_id": 4, "category_id": 2, "bbox": [197.46287536621094, 280.0835266113281, 344.75514221191406, 408.9064025878906], "score": 0.03628276288509369}, {"image_id": 4, "category_id": 2, "bbox": [548.575439453125, 529.0494995117188, 211.4327392578125, 185.15338134765625], "score": 0.03601684793829918}, {"image_id": 4, "category_id": 2, "bbox": [171.52679443359375, 5.440498352050781, 274.6717529296875, 387.7549057006836], "score": 0.03592975065112114}, {"image_id": 4, "category_id": 2, "bbox": [280.3587646484375, 111.11443328857422, 283.84307861328125, 385.0529251098633], "score": 0.03460601344704628}, {"image_id": 4, "category_id": 2, "bbox": [650.0533447265625, 288.8979797363281, 309.92169189453125, 382.9500427246094], "score": 0.03460288420319557}, {"image_id": 4, "category_id": 2, "bbox": [761.6210327148438, 0.0, 348.99566650390625, 276.1425476074219], "score": 0.03439189866185188}, {"image_id": 4, "category_id": 2, "bbox": [830.5775756835938, 474.5707092285156, 361.39666748046875, 245.42929077148438], "score": 0.034287821501493454}, {"image_id": 4, "category_id": 2, "bbox": [85.76986694335938, 467.0090026855469, 275.3326110839844, 252.99099731445312], "score": 0.03414274752140045}, {"image_id": 4, "category_id": 2, "bbox": [606.1180419921875, 211.51255798339844, 281.71209716796875, 403.4058380126953], "score": 0.03412311524152756}, {"image_id": 4, "category_id": 2, "bbox": [721.7003173828125, 161.3732452392578, 317.63671875, 434.6811981201172], "score": 0.03411446884274483}, {"image_id": 4, "category_id": 2, "bbox": [426.6127624511719, 0.0, 268.6819763183594, 353.5249328613281], "score": 0.033841244876384735}, {"image_id": 4, "category_id": 2, "bbox": [0.0, 315.8871765136719, 676.14208984375, 404.1128234863281], "score": 0.03380811586976051}, {"image_id": 4, "category_id": 2, "bbox": [620.773681640625, 95.28936767578125, 269.607177734375, 414.3451843261719], "score": 0.03357353433966637}, {"image_id": 4, "category_id": 2, "bbox": [416.8959045410156, 545.7114868164062, 213.46914672851562, 174.28851318359375], "score": 0.0333537720143795}, {"image_id": 4, "category_id": 2, "bbox": [0.0, 454.4352111816406, 269.10546875, 265.5647888183594], "score": 0.03333422914147377}, {"image_id": 4, "category_id": 2, "bbox": [141.33465576171875, 436.7359313964844, 300.3509216308594, 283.2640686035156], "score": 0.03271167725324631}, {"image_id": 4, "category_id": 2, "bbox": [604.18994140625, 0.0, 278.5653076171875, 233.19337463378906], "score": 0.032535750418901443}, {"image_id": 4, "category_id": 2, "bbox": [112.44378662109375, 172.37081909179688, 657.9807739257812, 547.6291809082031], "score": 0.03237712383270264}, {"image_id": 4, "category_id": 3, "bbox": [459.2315673828125, 415.57305908203125, 314.5303955078125, 304.42694091796875], "score": 0.38534069061279297}, {"image_id": 4, "category_id": 3, "bbox": [393.27252197265625, 374.6574401855469, 300.2847900390625, 345.3425598144531], "score": 0.29448482394218445}, {"image_id": 4, "category_id": 3, "bbox": [388.5574951171875, 289.20819091796875, 237.38525390625, 393.68426513671875], "score": 0.12873044610023499}, {"image_id": 4, "category_id": 3, "bbox": [459.2627868652344, 289.3208312988281, 289.4360656738281, 394.8551940917969], "score": 0.0830373466014862}, {"image_id": 4, "category_id": 3, "bbox": [528.46923828125, 492.8611755371094, 259.59014892578125, 227.13882446289062], "score": 0.06494628638029099}, {"image_id": 4, "category_id": 3, "bbox": [364.0242004394531, 494.07611083984375, 291.1603698730469, 225.92388916015625], "score": 0.06362966448068619}, {"image_id": 4, "category_id": 3, "bbox": [276.2272033691406, 324.1025695800781, 597.7773742675781, 395.8974304199219], "score": 0.06347459554672241}, {"image_id": 4, "category_id": 3, "bbox": [317.84332275390625, 374.79364013671875, 278.62921142578125, 345.20635986328125], "score": 0.06173808127641678}, {"image_id": 4, "category_id": 3, "bbox": [523.567138671875, 322.2514343261719, 276.173095703125, 397.7485656738281], "score": 0.05953706055879593}, {"image_id": 4, "category_id": 3, "bbox": [171.52679443359375, 5.440498352050781, 274.6717529296875, 387.7549057006836], "score": 0.058683689683675766}, {"image_id": 4, "category_id": 3, "bbox": [98.95840454101562, 251.74676513671875, 689.3782653808594, 468.25323486328125], "score": 0.05765705555677414}, {"image_id": 4, "category_id": 3, "bbox": [416.8959045410156, 545.7114868164062, 213.46914672851562, 174.28851318359375], "score": 0.05479136109352112}, {"image_id": 4, "category_id": 3, "bbox": [609.8814697265625, 528.9462280273438, 213.66949462890625, 183.48907470703125], "score": 0.05177099630236626}, {"image_id": 4, "category_id": 3, "bbox": [517.541748046875, 546.0967407226562, 209.85333251953125, 173.90325927734375], "score": 0.051112495362758636}, {"image_id": 4, "category_id": 3, "bbox": [420.20208740234375, 440.87548828125, 211.8594970703125, 180.5850830078125], "score": 0.04968176782131195}, {"image_id": 4, "category_id": 3, "bbox": [421.3651428222656, 495.494873046875, 209.07089233398438, 178.97833251953125], "score": 0.04661921411752701}, {"image_id": 4, "category_id": 3, "bbox": [381.5361328125, 598.6461791992188, 225.24853515625, 121.35382080078125], "score": 0.04619172215461731}, {"image_id": 4, "category_id": 3, "bbox": [373.1715393066406, 239.75689697265625, 655.7983093261719, 480.24310302734375], "score": 0.04345839098095894}, {"image_id": 4, "category_id": 3, "bbox": [353.927978515625, 548.8246459960938, 213.527587890625, 171.17535400390625], "score": 0.043264687061309814}, {"image_id": 4, "category_id": 3, "bbox": [320.9720458984375, 499.8532409667969, 213.4853515625, 172.79281616210938], "score": 0.04196581244468689}, {"image_id": 4, "category_id": 3, "bbox": [580.048095703125, 564.3116455078125, 212.54022216796875, 155.6883544921875], "score": 0.04105975851416588}, {"image_id": 4, "category_id": 3, "bbox": [834.9589233398438, 335.39703369140625, 213.86871337890625, 173.6868896484375], "score": 0.03943229839205742}, {"image_id": 4, "category_id": 3, "bbox": [480.92791748046875, 499.3553771972656, 212.8758544921875, 171.00253295898438], "score": 0.039415087550878525}, {"image_id": 4, "category_id": 3, "bbox": [353.80291748046875, 443.3209228515625, 216.3509521484375, 176.776123046875], "score": 0.0391753688454628}, {"image_id": 4, "category_id": 3, "bbox": [583.0355224609375, 336.70538330078125, 303.4783935546875, 373.43157958984375], "score": 0.03910498693585396}, {"image_id": 4, "category_id": 3, "bbox": [485.42803955078125, 444.6786193847656, 212.18157958984375, 172.91091918945312], "score": 0.03897371515631676}, {"image_id": 4, "category_id": 3, "bbox": [581.7996826171875, 498.9322204589844, 211.00421142578125, 172.51522827148438], "score": 0.03828175365924835}, {"image_id": 4, "category_id": 3, "bbox": [442.0533752441406, 605.1209106445312, 230.36288452148438, 114.87908935546875], "score": 0.03827302157878876}, {"image_id": 4, "category_id": 3, "bbox": [544.7129516601562, 603.0615234375, 223.16644287109375, 116.9384765625], "score": 0.037654828280210495}, {"image_id": 4, "category_id": 3, "bbox": [586.066162109375, 14.718589782714844, 556.770263671875, 633.8289566040039], "score": 0.03760984539985657}, {"image_id": 4, "category_id": 3, "bbox": [289.412109375, 549.0254516601562, 214.78070068359375, 170.97454833984375], "score": 0.036792874336242676}, {"image_id": 4, "category_id": 3, "bbox": [897.6357421875, 333.78009033203125, 214.9720458984375, 175.44891357421875], "score": 0.03628296032547951}, {"image_id": 4, "category_id": 3, "bbox": [452.0950012207031, 410.3497314453125, 212.66842651367188, 170.955810546875], "score": 0.036265309900045395}, {"image_id": 4, "category_id": 3, "bbox": [386.1021728515625, 409.87908935546875, 215.80743408203125, 171.2523193359375], "score": 0.03535580262541771}, {"image_id": 4, "category_id": 3, "bbox": [547.2571411132812, 462.59527587890625, 212.1639404296875, 171.005615234375], "score": 0.03344094753265381}, {"image_id": 4, "category_id": 3, "bbox": [28.508636474609375, 82.23001861572266, 602.0848693847656, 620.9732284545898], "score": 0.03335680440068245}, {"image_id": 4, "category_id": 3, "bbox": [963.8678588867188, 317.1954040527344, 212.76885986328125, 174.15679931640625], "score": 0.03296693414449692}, {"image_id": 4, "category_id": 3, "bbox": [611.0526123046875, 608.614990234375, 220.71588134765625, 111.385009765625], "score": 0.03240879252552986}, {"image_id": 5, "category_id": 1, "bbox": [963.938232421875, 233.01246643066406, 378.40966796875, 536.3555755615234], "score": 0.36887314915657043}, {"image_id": 5, "category_id": 1, "bbox": [4.5380859375, 85.4964828491211, 984.091552734375, 952.5992202758789], "score": 0.0984569638967514}, {"image_id": 5, "category_id": 1, "bbox": [199.5319061279297, 60.394866943359375, 388.78639221191406, 562.7030944824219], "score": 0.08474069088697433}, {"image_id": 5, "category_id": 1, "bbox": [849.1188354492188, 190.40716552734375, 439.66693115234375, 537.6932373046875], "score": 0.08252069354057312}, {"image_id": 5, "category_id": 1, "bbox": [945.1074829101562, 0.0, 412.38677978515625, 612.0238647460938], "score": 0.07950934767723083}, {"image_id": 5, "category_id": 1, "bbox": [1679.1453857421875, 0.0, 240.8546142578125, 222.76719665527344], "score": 0.06727643311023712}, {"image_id": 5, "category_id": 1, "bbox": [328.2125244140625, 125.40141296386719, 1065.3607177734375, 925.1208038330078], "score": 0.06474706530570984}, {"image_id": 5, "category_id": 1, "bbox": [0.0, 461.1424255371094, 1005.0798950195312, 618.8575744628906], "score": 0.05938892066478729}, {"image_id": 5, "category_id": 1, "bbox": [925.4653930664062, 358.2093811035156, 402.77838134765625, 613.5608215332031], "score": 0.05283917114138603}, {"image_id": 5, "category_id": 1, "bbox": [525.0779418945312, 16.17827796936035, 1062.0364379882812, 955.5028133392334], "score": 0.05164412036538124}, {"image_id": 5, "category_id": 1, "bbox": [852.7999877929688, 199.11953735351562, 818.5951538085938, 880.8804626464844], "score": 0.04837699607014656}, {"image_id": 5, "category_id": 1, "bbox": [836.856689453125, 0.0, 885.6988525390625, 868.0389404296875], "score": 0.04465340077877045}, {"image_id": 5, "category_id": 1, "bbox": [269.39129638671875, 456.9488220214844, 1141.6948852539062, 623.0511779785156], "score": 0.03814760968089104}, {"image_id": 5, "category_id": 1, "bbox": [0.0, 129.7044219970703, 601.8995361328125, 906.4448699951172], "score": 0.03804462030529976}, {"image_id": 5, "category_id": 1, "bbox": [804.1773071289062, 0.0, 484.08758544921875, 539.6320190429688], "score": 0.03566613420844078}, {"image_id": 5, "category_id": 1, "bbox": [0.0, 481.0958557128906, 346.5749816894531, 598.7986755371094], "score": 0.03265869989991188}, {"image_id": 5, "category_id": 1, "bbox": [594.76708984375, 0.0, 936.7724609375, 485.5172119140625], "score": 0.030717572197318077}, {"image_id": 5, "category_id": 2, "bbox": [964.4942016601562, 189.57858276367188, 383.59674072265625, 539.5215759277344], "score": 0.2539811134338379}, {"image_id": 5, "category_id": 2, "bbox": [850.6994018554688, 234.37591552734375, 434.60882568359375, 535.031494140625], "score": 0.12629173696041107}, {"image_id": 5, "category_id": 2, "bbox": [925.4653930664062, 358.2093811035156, 402.77838134765625, 613.5608215332031], "score": 0.07573205232620239}, {"image_id": 5, "category_id": 2, "bbox": [1627.166015625, 0.0, 292.833984375, 216.1064453125], "score": 0.07191196829080582}, {"image_id": 5, "category_id": 2, "bbox": [981.9532470703125, 419.2640686035156, 477.4996337890625, 602.1694641113281], "score": 0.06688620895147324}, {"image_id": 5, "category_id": 2, "bbox": [892.8466796875, 534.0514526367188, 465.4395751953125, 545.9485473632812], "score": 0.06147313490509987}, {"image_id": 5, "category_id": 2, "bbox": [836.496826171875, 55.360382080078125, 445.3927001953125, 609.2064514160156], "score": 0.05664169788360596}, {"image_id": 5, "category_id": 2, "bbox": [1075.5103759765625, 461.5849609375, 492.5906982421875, 613.8486328125], "score": 0.05623174086213112}, {"image_id": 5, "category_id": 2, "bbox": [832.3295288085938, 440.1358337402344, 448.12908935546875, 573.0678405761719], "score": 0.05583732947707176}, {"image_id": 5, "category_id": 2, "bbox": [1094.268798828125, 142.49664306640625, 447.796875, 646.52978515625], "score": 0.05311533808708191}, {"image_id": 5, "category_id": 2, "bbox": [551.4125366210938, 179.68995666503906, 1001.8822631835938, 900.3100433349609], "score": 0.05042977258563042}, {"image_id": 5, "category_id": 2, "bbox": [945.1074829101562, 0.0, 412.38677978515625, 612.0238647460938], "score": 0.04929882660508156}, {"image_id": 5, "category_id": 2, "bbox": [1086.3311767578125, 5.117500305175781, 401.651123046875, 601.986442565918], "score": 0.04431271553039551}, {"image_id": 5, "category_id": 2, "bbox": [839.578125, 316.3777160644531, 854.673828125, 763.6222839355469], "score": 0.04303337633609772}, {"image_id": 5, "category_id": 2, "bbox": [1720.2073974609375, 0.0, 199.7926025390625, 183.08116149902344], "score": 0.04298314079642296}, {"image_id": 5, "category_id": 2, "bbox": [729.3356323242188, 123.33980560302734, 437.70892333984375, 661.0444717407227], "score": 0.04227873310446739}, {"image_id": 5, "category_id": 2, "bbox": [790.2385864257812, 689.5328369140625, 516.7190551757812, 390.4671630859375], "score": 0.041595570743083954}, {"image_id": 5, "category_id": 2, "bbox": [688.1409301757812, 541.0933837890625, 503.15155029296875, 538.9066162109375], "score": 0.041065916419029236}, {"image_id": 5, "category_id": 2, "bbox": [986.6705932617188, 638.8688354492188, 470.74310302734375, 441.13116455078125], "score": 0.0401003435254097}, {"image_id": 5, "category_id": 2, "bbox": [591.451171875, 684.6607666015625, 519.3575439453125, 395.3392333984375], "score": 0.03966795653104782}, {"image_id": 5, "category_id": 2, "bbox": [1190.732666015625, 124.09147644042969, 435.7515869140625, 576.9820098876953], "score": 0.039484381675720215}, {"image_id": 5, "category_id": 2, "bbox": [196.65985107421875, 22.55493927001953, 396.87432861328125, 568.7380905151367], "score": 0.03839555382728577}, {"image_id": 5, "category_id": 2, "bbox": [180.57652282714844, 121.62328338623047, 374.33229064941406, 627.2893753051758], "score": 0.037613555788993835}, {"image_id": 5, "category_id": 2, "bbox": [16.191009521484375, 0.0, 470.7978515625, 381.8108215332031], "score": 0.03662721812725067}, {"image_id": 5, "category_id": 2, "bbox": [126.4090576171875, 89.85628509521484, 1058.009765625, 943.7529678344727], "score": 0.03633274883031845}, {"image_id": 5, "category_id": 2, "bbox": [725.4708251953125, 384.8214111328125, 463.244873046875, 571.9849243164062], "score": 0.036165591329336166}, {"image_id": 5, "category_id": 2, "bbox": [120.7701416015625, 0.0, 501.6590576171875, 352.70391845703125], "score": 0.035838425159454346}, {"image_id": 5, "category_id": 2, "bbox": [1015.138916015625, 250.60003662109375, 319.366943359375, 279.38946533203125], "score": 0.03547172620892525}, {"image_id": 5, "category_id": 2, "bbox": [224.23095703125, 193.9009552001953, 426.583984375, 607.6521453857422], "score": 0.035288255661726}, {"image_id": 5, "category_id": 2, "bbox": [1303.807373046875, 646.8085327148438, 470.7744140625, 433.19146728515625], "score": 0.034874532371759415}, {"image_id": 5, "category_id": 2, "bbox": [977.6273803710938, 452.032470703125, 942.3726196289062, 627.967529296875], "score": 0.03468426689505577}, {"image_id": 5, "category_id": 2, "bbox": [1457.838623046875, 603.6132202148438, 455.9486083984375, 476.38677978515625], "score": 0.03441321849822998}, {"image_id": 5, "category_id": 2, "bbox": [306.0850524902344, 0.0, 446.6165466308594, 575.2744750976562], "score": 0.034330084919929504}, {"image_id": 5, "category_id": 2, "bbox": [1753.9102783203125, 0.0, 166.0897216796875, 281.0068664550781], "score": 0.03428940102458}, {"image_id": 5, "category_id": 2, "bbox": [99.05097198486328, 577.4408569335938, 492.24681854248047, 502.55914306640625], "score": 0.03417115658521652}, {"image_id": 5, "category_id": 2, "bbox": [373.6672058105469, 0.0, 1027.3126525878906, 486.6355895996094], "score": 0.033685971051454544}, {"image_id": 5, "category_id": 2, "bbox": [1078.774169921875, 304.4322814941406, 432.269287109375, 606.9153747558594], "score": 0.0335426889359951}, {"image_id": 5, "category_id": 2, "bbox": [1217.0350341796875, 522.5675048828125, 394.6165771484375, 557.4324951171875], "score": 0.03285492956638336}, {"image_id": 5, "category_id": 2, "bbox": [663.28515625, 762.4114990234375, 552.38623046875, 317.5885009765625], "score": 0.03228652849793434}, {"image_id": 5, "category_id": 2, "bbox": [1374.1668701171875, 0.0, 498.3873291015625, 484.792724609375], "score": 0.03200570493936539}, {"image_id": 5, "category_id": 2, "bbox": [398.489501953125, 0.0, 472.421875, 538.785888671875], "score": 0.03170382231473923}, {"image_id": 5, "category_id": 2, "bbox": [0.0, 107.42440032958984, 788.7800903320312, 929.7892227172852], "score": 0.0311779472976923}, {"image_id": 5, "category_id": 2, "bbox": [1291.4600830078125, 72.68331146240234, 405.6448974609375, 572.5352554321289], "score": 0.031125115230679512}, {"image_id": 5, "category_id": 2, "bbox": [918.5374145507812, 252.79957580566406, 317.66107177734375, 276.73582458496094], "score": 0.03107491508126259}, {"image_id": 5, "category_id": 2, "bbox": [970.216552734375, 205.97080993652344, 313.430419921875, 260.76707458496094], "score": 0.03087024763226509}, {"image_id": 5, "category_id": 2, "bbox": [1193.930908203125, 344.94781494140625, 437.0447998046875, 642.976318359375], "score": 0.030766673386096954}, {"image_id": 5, "category_id": 2, "bbox": [294.4600524902344, 131.92250061035156, 460.5191955566406, 604.9305877685547], "score": 0.03049168922007084}, {"image_id": 5, "category_id": 2, "bbox": [1561.160888671875, 0.0, 358.839111328125, 342.0604248046875], "score": 0.030460039153695107}, {"image_id": 5, "category_id": 3, "bbox": [964.4942016601562, 189.57858276367188, 383.59674072265625, 539.5215759277344], "score": 0.6594330072402954}, {"image_id": 5, "category_id": 3, "bbox": [850.6994018554688, 234.37591552734375, 434.60882568359375, 535.031494140625], "score": 0.2742394804954529}, {"image_id": 5, "category_id": 3, "bbox": [1678.1170654296875, 0.0, 241.8829345703125, 203.9327392578125], "score": 0.09158416092395782}, {"image_id": 5, "category_id": 3, "bbox": [199.5319061279297, 60.394866943359375, 388.78639221191406, 562.7030944824219], "score": 0.08573468029499054}, {"image_id": 5, "category_id": 3, "bbox": [925.4653930664062, 358.2093811035156, 402.77838134765625, 613.5608215332031], "score": 0.08406245708465576}, {"image_id": 5, "category_id": 3, "bbox": [1002.5576171875, 0.0, 371.56640625, 606.13330078125], "score": 0.07182856649160385}, {"image_id": 5, "category_id": 3, "bbox": [126.4090576171875, 89.85628509521484, 1058.009765625, 943.7529678344727], "score": 0.056805241852998734}, {"image_id": 5, "category_id": 3, "bbox": [1019.1641235351562, 277.0384521484375, 314.29779052734375, 278.63006591796875], "score": 0.053454555571079254}, {"image_id": 5, "category_id": 3, "bbox": [836.496826171875, 55.360382080078125, 445.3927001953125, 609.2064514160156], "score": 0.048420172184705734}, {"image_id": 5, "category_id": 3, "bbox": [1705.8238525390625, 14.663670539855957, 214.1761474609375, 274.8979444503784], "score": 0.047605011612176895}, {"image_id": 5, "category_id": 3, "bbox": [918.5374145507812, 252.79957580566406, 317.66107177734375, 276.73582458496094], "score": 0.04616464301943779}, {"image_id": 5, "category_id": 3, "bbox": [0.0, 343.3861083984375, 983.4697875976562, 736.6138916015625], "score": 0.045841116458177567}, {"image_id": 5, "category_id": 3, "bbox": [965.4124145507812, 499.0762634277344, 318.77008056640625, 271.6882019042969], "score": 0.04504524543881416}, {"image_id": 5, "category_id": 3, "bbox": [968.5408325195312, 392.0644836425781, 317.89410400390625, 267.6421203613281], "score": 0.04255703091621399}, {"image_id": 5, "category_id": 3, "bbox": [866.0108642578125, 498.1739501953125, 323.8389892578125, 272.00518798828125], "score": 0.040775831788778305}, {"image_id": 5, "category_id": 3, "bbox": [916.8867797851562, 449.22857666015625, 323.19256591796875, 262.79571533203125], "score": 0.040395185351371765}, {"image_id": 5, "category_id": 3, "bbox": [1012.4703979492188, 448.9977722167969, 324.78216552734375, 262.7613830566406], "score": 0.039489082992076874}, {"image_id": 5, "category_id": 3, "bbox": [1062.223876953125, 231.535888671875, 317.921875, 265.13397216796875], "score": 0.038920801132917404}, {"image_id": 5, "category_id": 3, "bbox": [868.7488403320312, 393.4620056152344, 323.11541748046875, 265.2958068847656], "score": 0.03691669553518295}, {"image_id": 5, "category_id": 3, "bbox": [1110.618408203125, 285.7287292480469, 320.436279296875, 265.6927185058594], "score": 0.036816783249378204}, {"image_id": 5, "category_id": 3, "bbox": [970.216552734375, 205.97080993652344, 313.430419921875, 260.76707458496094], "score": 0.03649832680821419}, {"image_id": 5, "category_id": 3, "bbox": [1059.676513671875, 336.45654296875, 320.9581298828125, 271.743408203125], "score": 0.036247413605451584}, {"image_id": 5, "category_id": 3, "bbox": [923.6506958007812, 339.0591735839844, 322.67730712890625, 269.3127746582031], "score": 0.036209579557180405}, {"image_id": 5, "category_id": 3, "bbox": [1157.6790771484375, 233.13815307617188, 320.8616943359375, 261.1270751953125], "score": 0.0360611267387867}, {"image_id": 5, "category_id": 3, "bbox": [323.296142578125, 203.8575439453125, 1053.13623046875, 876.1424560546875], "score": 0.034500110894441605}, {"image_id": 5, "category_id": 3, "bbox": [873.3296508789062, 313.27581787109375, 314.24517822265625, 263.5931396484375], "score": 0.03431421518325806}, {"image_id": 5, "category_id": 3, "bbox": [1078.36865234375, 62.29108810424805, 426.806396484375, 602.904224395752], "score": 0.034006573259830475}, {"image_id": 5, "category_id": 3, "bbox": [1155.3427734375, 339.1759033203125, 320.3338623046875, 263.83453369140625], "score": 0.03386393561959267}, {"image_id": 5, "category_id": 3, "bbox": [1053.3046875, 501.88055419921875, 327.778564453125, 265.1337890625], "score": 0.033198725432157516}, {"image_id": 5, "category_id": 3, "bbox": [912.8214721679688, 638.0339965820312, 328.01470947265625, 261.42120361328125], "score": 0.032790012657642365}, {"image_id": 5, "category_id": 3, "bbox": [1102.849365234375, 447.09027099609375, 326.419921875, 266.15057373046875], "score": 0.03268580883741379}, {"image_id": 5, "category_id": 3, "bbox": [816.3561401367188, 449.74176025390625, 326.88531494140625, 260.75927734375], "score": 0.03140271455049515}, {"image_id": 5, "category_id": 3, "bbox": [1059.31591796875, 154.44131469726562, 319.5966796875, 256.9745178222656], "score": 0.03138384968042374}, {"image_id": 5, "category_id": 3, "bbox": [525.0779418945312, 16.17827796936035, 1062.0364379882812, 955.5028133392334], "score": 0.03127110004425049}, {"image_id": 5, "category_id": 3, "bbox": [820.9056396484375, 257.2904357910156, 320.345703125, 265.1499328613281], "score": 0.031157314777374268}, {"image_id": 6, "category_id": 1, "bbox": [720.9788818359375, 370.5569763183594, 246.3680419921875, 349.4430236816406], "score": 0.7641261219978333}, {"image_id": 6, "category_id": 1, "bbox": [523.7584228515625, 256.5231628417969, 261.34075927734375, 419.3395080566406], "score": 0.6450764536857605}, {"image_id": 6, "category_id": 1, "bbox": [427.97137451171875, 250.76568603515625, 285.456298828125, 423.21575927734375], "score": 0.3257957696914673}, {"image_id": 6, "category_id": 1, "bbox": [644.2816162109375, 387.2144470214844, 277.70269775390625, 332.7855529785156], "score": 0.24539099633693695}, {"image_id": 6, "category_id": 1, "bbox": [583.3834838867188, 254.33953857421875, 291.35101318359375, 415.74365234375], "score": 0.20962277054786682}, {"image_id": 6, "category_id": 1, "bbox": [732.5083618164062, 246.4562530517578, 253.30316162109375, 427.5559539794922], "score": 0.20480124652385712}, {"image_id": 6, "category_id": 1, "bbox": [561.6874389648438, 188.48182678222656, 219.345458984375, 399.24046325683594], "score": 0.20084549486637115}, {"image_id": 6, "category_id": 1, "bbox": [283.37530517578125, 10.594184875488281, 577.5358276367188, 667.4562911987305], "score": 0.16656890511512756}, {"image_id": 6, "category_id": 1, "bbox": [878.7666015625, 169.1527099609375, 264.3162841796875, 413.601318359375], "score": 0.1643669456243515}, {"image_id": 6, "category_id": 1, "bbox": [476.1641540527344, 346.0792236328125, 284.9002380371094, 373.9207763671875], "score": 0.1570691019296646}, {"image_id": 6, "category_id": 1, "bbox": [466.82080078125, 143.6969451904297, 317.2979736328125, 394.5503692626953], "score": 0.1508159190416336}, {"image_id": 6, "category_id": 1, "bbox": [390.69390869140625, 228.4336395263672, 231.9134521484375, 397.76295471191406], "score": 0.12375758588314056}, {"image_id": 6, "category_id": 1, "bbox": [407.029541015625, 80.96017456054688, 566.8671875, 630.0451965332031], "score": 0.12236762046813965}, {"image_id": 6, "category_id": 1, "bbox": [788.2811279296875, 334.2135009765625, 257.9755859375, 384.63916015625], "score": 0.10899021476507187}, {"image_id": 6, "category_id": 1, "bbox": [838.644287109375, 176.7568359375, 223.5047607421875, 402.03228759765625], "score": 0.0935923159122467}, {"image_id": 6, "category_id": 1, "bbox": [0.0, 13.249168395996094, 645.135498046875, 658.0433120727539], "score": 0.08977632969617844}, {"image_id": 6, "category_id": 1, "bbox": [1066.8819580078125, 0.4797248840332031, 213.1180419921875, 402.6886100769043], "score": 0.08265255391597748}, {"image_id": 6, "category_id": 1, "bbox": [380.2679443359375, 149.12496948242188, 264.1180419921875, 384.5682067871094], "score": 0.08253179490566254}, {"image_id": 6, "category_id": 1, "bbox": [374.1619873046875, 322.0678405761719, 292.58001708984375, 390.7514343261719], "score": 0.07784484326839447}, {"image_id": 6, "category_id": 1, "bbox": [918.847900390625, 241.1309814453125, 250.914306640625, 407.61541748046875], "score": 0.0713098868727684}, {"image_id": 6, "category_id": 1, "bbox": [139.26727294921875, 90.86822509765625, 641.15869140625, 624.2081909179688], "score": 0.07065185904502869}, {"image_id": 6, "category_id": 1, "bbox": [671.7904663085938, 123.68303680419922, 608.2095336914062, 590.1875686645508], "score": 0.07017822563648224}, {"image_id": 6, "category_id": 1, "bbox": [462.91522216796875, 170.22604370117188, 703.2243041992188, 549.7739562988281], "score": 0.06965098530054092}, {"image_id": 6, "category_id": 1, "bbox": [307.3369140625, 246.39854431152344, 535.6517944335938, 473.60145568847656], "score": 0.057650912553071976}, {"image_id": 6, "category_id": 1, "bbox": [441.63226318359375, 428.74267578125, 254.866455078125, 291.25732421875], "score": 0.05508614331483841}, {"image_id": 6, "category_id": 1, "bbox": [419.5931396484375, 66.94788360595703, 309.95623779296875, 413.67589569091797], "score": 0.05441202595829964}, {"image_id": 6, "category_id": 1, "bbox": [492.3226623535156, 0.0, 713.6314392089844, 581.7380981445312], "score": 0.05152516812086105}, {"image_id": 6, "category_id": 1, "bbox": [820.8189086914062, 44.52212905883789, 459.18109130859375, 583.6979026794434], "score": 0.05112265795469284}, {"image_id": 6, "category_id": 1, "bbox": [0.0, 300.68133544921875, 281.62872314453125, 419.31866455078125], "score": 0.049239568412303925}, {"image_id": 6, "category_id": 1, "bbox": [590.8546142578125, 149.18600463867188, 281.187744140625, 395.6278381347656], "score": 0.046431127935647964}, {"image_id": 6, "category_id": 1, "bbox": [537.57470703125, 74.0362777709961, 260.8310546875, 399.4068374633789], "score": 0.04632844030857086}, {"image_id": 6, "category_id": 1, "bbox": [169.17776489257812, 41.848880767822266, 268.6673583984375, 367.65624618530273], "score": 0.0451013557612896}, {"image_id": 6, "category_id": 2, "bbox": [427.5109558105469, 307.82025146484375, 300.4128112792969, 408.2813720703125], "score": 0.15348239243030548}, {"image_id": 6, "category_id": 2, "bbox": [372.53546142578125, 368.7740478515625, 286.97430419921875, 351.2259521484375], "score": 0.116594597697258}, {"image_id": 6, "category_id": 2, "bbox": [523.06884765625, 275.1982116699219, 261.3258056640625, 430.2593078613281], "score": 0.09982141107320786}, {"image_id": 6, "category_id": 2, "bbox": [720.1588745117188, 340.6453552246094, 248.97918701171875, 371.7548522949219], "score": 0.09673894196748734}, {"image_id": 6, "category_id": 2, "bbox": [385.7091064453125, 251.12208557128906, 254.6424560546875, 422.0002899169922], "score": 0.08893060684204102}, {"image_id": 6, "category_id": 2, "bbox": [441.63226318359375, 428.74267578125, 254.866455078125, 291.25732421875], "score": 0.08636680990457535}, {"image_id": 6, "category_id": 2, "bbox": [297.5578918457031, 317.5335693359375, 284.1014709472656, 398.00445556640625], "score": 0.07556338608264923}, {"image_id": 6, "category_id": 2, "bbox": [433.64691162109375, 230.18014526367188, 261.87744140625, 392.4980163574219], "score": 0.07122409343719482}, {"image_id": 6, "category_id": 2, "bbox": [644.2816162109375, 387.2144470214844, 277.70269775390625, 332.7855529785156], "score": 0.07004447281360626}, {"image_id": 6, "category_id": 2, "bbox": [788.2811279296875, 334.2135009765625, 257.9755859375, 384.63916015625], "score": 0.06478866189718246}, {"image_id": 6, "category_id": 2, "bbox": [878.7666015625, 169.1527099609375, 264.3162841796875, 413.601318359375], "score": 0.06240314245223999}, {"image_id": 6, "category_id": 2, "bbox": [918.847900390625, 241.1309814453125, 250.914306640625, 407.61541748046875], "score": 0.06226561591029167}, {"image_id": 6, "category_id": 2, "bbox": [208.18753051757812, 323.3338928222656, 313.1707458496094, 389.2586364746094], "score": 0.05980860814452171}, {"image_id": 6, "category_id": 2, "bbox": [424.4762878417969, 146.11839294433594, 299.1690368652344, 385.92909240722656], "score": 0.05606542155146599}, {"image_id": 6, "category_id": 2, "bbox": [835.6898803710938, 145.2556610107422, 246.58868408203125, 405.76271057128906], "score": 0.052976176142692566}, {"image_id": 6, "category_id": 2, "bbox": [317.8974609375, 218.14646911621094, 265.4510498046875, 397.53956604003906], "score": 0.05207947641611099}, {"image_id": 6, "category_id": 2, "bbox": [139.26727294921875, 90.86822509765625, 641.15869140625, 624.2081909179688], "score": 0.04695218801498413}, {"image_id": 6, "category_id": 2, "bbox": [561.6874389648438, 188.48182678222656, 219.345458984375, 399.24046325683594], "score": 0.04631302133202553}, {"image_id": 6, "category_id": 2, "bbox": [378.5179443359375, 112.9122085571289, 272.03399658203125, 391.3154525756836], "score": 0.04609045386314392}, {"image_id": 6, "category_id": 2, "bbox": [866.2034301757812, 298.10211181640625, 268.33013916015625, 372.35162353515625], "score": 0.0458943173289299}, {"image_id": 6, "category_id": 2, "bbox": [733.36328125, 461.1555480957031, 272.66778564453125, 258.8444519042969], "score": 0.04520067200064659}, {"image_id": 6, "category_id": 2, "bbox": [918.3129272460938, 125.32978057861328, 258.50286865234375, 379.0383834838867], "score": 0.044894006103277206}, {"image_id": 6, "category_id": 2, "bbox": [130.38897705078125, 392.0223083496094, 351.38433837890625, 327.9776916503906], "score": 0.044658761471509933}, {"image_id": 6, "category_id": 2, "bbox": [875.3162231445312, 84.0682373046875, 258.06610107421875, 377.45330810546875], "score": 0.04439789801836014}, {"image_id": 6, "category_id": 2, "bbox": [531.8588256835938, 424.4071960449219, 270.03802490234375, 295.5928039550781], "score": 0.044106438755989075}, {"image_id": 6, "category_id": 2, "bbox": [65.30635070800781, 0.0, 346.57273864746094, 276.31640625], "score": 0.04342801496386528}, {"image_id": 6, "category_id": 2, "bbox": [136.4152374267578, 240.4580535888672, 331.3274688720703, 424.5629425048828], "score": 0.0433354414999485}, {"image_id": 6, "category_id": 2, "bbox": [972.1996459960938, 197.76724243164062, 289.83563232421875, 423.7947082519531], "score": 0.042921505868434906}, {"image_id": 6, "category_id": 2, "bbox": [922.26318359375, 0.0, 305.8455810546875, 403.6990661621094], "score": 0.04288630932569504}, {"image_id": 6, "category_id": 2, "bbox": [739.4329223632812, 211.04873657226562, 253.01788330078125, 413.1273498535156], "score": 0.042710598558187485}, {"image_id": 6, "category_id": 2, "bbox": [415.62030029296875, 22.812732696533203, 293.5704345703125, 430.4707145690918], "score": 0.04270061478018761}, {"image_id": 6, "category_id": 2, "bbox": [214.23646545410156, 99.20015716552734, 290.8599090576172, 407.0506057739258], "score": 0.04255998879671097}, {"image_id": 6, "category_id": 2, "bbox": [291.56689453125, 110.5218276977539, 288.984619140625, 391.6353988647461], "score": 0.04212484881281853}, {"image_id": 6, "category_id": 2, "bbox": [217.59121704101562, 219.07424926757812, 312.4834289550781, 396.1279602050781], "score": 0.041725825518369675}, {"image_id": 6, "category_id": 3, "bbox": [433.2281799316406, 362.6020202636719, 280.0628356933594, 357.3979797363281], "score": 0.41693389415740967}, {"image_id": 6, "category_id": 3, "bbox": [501.8067626953125, 293.2696533203125, 278.31768798828125, 426.7303466796875], "score": 0.25722169876098633}, {"image_id": 6, "category_id": 3, "bbox": [371.3837890625, 407.3714904785156, 283.28790283203125, 312.6285095214844], "score": 0.19768662750720978}, {"image_id": 6, "category_id": 3, "bbox": [918.847900390625, 241.1309814453125, 250.914306640625, 407.61541748046875], "score": 0.15092028677463531}, {"image_id": 6, "category_id": 3, "bbox": [733.44677734375, 339.69818115234375, 241.42529296875, 373.4830322265625], "score": 0.14349032938480377}, {"image_id": 6, "category_id": 3, "bbox": [314.3943786621094, 88.0880355834961, 534.5744323730469, 626.8357315063477], "score": 0.14206820726394653}, {"image_id": 6, "category_id": 3, "bbox": [879.5408935546875, 147.14418029785156, 260.4053955078125, 405.1414031982422], "score": 0.1332140862941742}, {"image_id": 6, "category_id": 3, "bbox": [385.7091064453125, 251.12208557128906, 254.6424560546875, 422.0002899169922], "score": 0.10912498831748962}, {"image_id": 6, "category_id": 3, "bbox": [386.55126953125, 233.67933654785156, 582.17724609375, 486.32066345214844], "score": 0.10836091637611389}, {"image_id": 6, "category_id": 3, "bbox": [462.91522216796875, 170.22604370117188, 703.2243041992188, 549.7739562988281], "score": 0.0954907238483429}, {"image_id": 6, "category_id": 3, "bbox": [561.0498657226562, 227.4679412841797, 217.21875, 408.93116760253906], "score": 0.0893741026520729}, {"image_id": 6, "category_id": 3, "bbox": [374.5230712890625, 9.267276763916016, 636.606201171875, 662.766658782959], "score": 0.08481226116418839}, {"image_id": 6, "category_id": 3, "bbox": [433.64691162109375, 230.18014526367188, 261.87744140625, 392.4980163574219], "score": 0.07929591834545135}, {"image_id": 6, "category_id": 3, "bbox": [424.4762878417969, 146.11839294433594, 299.1690368652344, 385.92909240722656], "score": 0.07340463995933533}, {"image_id": 6, "category_id": 3, "bbox": [838.5061645507812, 216.0487823486328, 206.11370849609375, 398.32359313964844], "score": 0.06982134282588959}, {"image_id": 6, "category_id": 3, "bbox": [84.489990234375, 11.848548889160156, 704.285888671875, 655.4718856811523], "score": 0.06689397990703583}, {"image_id": 6, "category_id": 3, "bbox": [531.8588256835938, 424.4071960449219, 270.03802490234375, 295.5928039550781], "score": 0.06408730894327164}, {"image_id": 6, "category_id": 3, "bbox": [644.2816162109375, 387.2144470214844, 277.70269775390625, 332.7855529785156], "score": 0.06312553584575653}, {"image_id": 6, "category_id": 3, "bbox": [169.17776489257812, 41.848880767822266, 268.6673583984375, 367.65624618530273], "score": 0.06122942268848419}, {"image_id": 6, "category_id": 3, "bbox": [866.2034301757812, 298.10211181640625, 268.33013916015625, 372.35162353515625], "score": 0.06106466054916382}, {"image_id": 6, "category_id": 3, "bbox": [125.578125, 244.1772918701172, 645.34423828125, 475.8227081298828], "score": 0.05855504423379898}, {"image_id": 6, "category_id": 3, "bbox": [452.69049072265625, 457.8127136230469, 211.655517578125, 181.10324096679688], "score": 0.05787421390414238}, {"image_id": 6, "category_id": 3, "bbox": [973.236572265625, 236.33238220214844, 285.948486328125, 412.5506134033203], "score": 0.05285995826125145}, {"image_id": 6, "category_id": 3, "bbox": [512.7431640625, 459.04376220703125, 213.51739501953125, 179.37567138671875], "score": 0.05088053643703461}, {"image_id": 6, "category_id": 3, "bbox": [739.4329223632812, 211.04873657226562, 253.01788330078125, 413.1273498535156], "score": 0.050243888050317764}, {"image_id": 6, "category_id": 3, "bbox": [482.4781494140625, 421.3636474609375, 213.799560546875, 183.13177490234375], "score": 0.04952119663357735}, {"image_id": 6, "category_id": 3, "bbox": [583.3834838867188, 254.33953857421875, 291.35101318359375, 415.74365234375], "score": 0.0489606112241745}, {"image_id": 6, "category_id": 3, "bbox": [578.3986206054688, 278.63531494140625, 213.7393798828125, 181.76132202148438], "score": 0.047060586512088776}, {"image_id": 6, "category_id": 3, "bbox": [834.0794677734375, 118.098388671875, 251.09423828125, 391.6617126464844], "score": 0.045498546212911606}, {"image_id": 6, "category_id": 3, "bbox": [482.2995300292969, 493.7231750488281, 213.36587524414062, 180.84353637695312], "score": 0.04495788365602493}, {"image_id": 6, "category_id": 3, "bbox": [788.2811279296875, 334.2135009765625, 257.9755859375, 384.63916015625], "score": 0.04478335753083229}, {"image_id": 6, "category_id": 3, "bbox": [671.7904663085938, 123.68303680419922, 608.2095336914062, 590.1875686645508], "score": 0.0439547561109066}, {"image_id": 6, "category_id": 3, "bbox": [387.21063232421875, 477.0207214355469, 213.377685546875, 178.39498901367188], "score": 0.04372669756412506}, {"image_id": 6, "category_id": 3, "bbox": [420.403076171875, 423.6772766113281, 213.90008544921875, 181.20639038085938], "score": 0.042805273085832596}, {"image_id": 7, "category_id": 1, "bbox": [547.563232421875, 185.5882110595703, 310.4661865234375, 457.31468200683594], "score": 0.8301798701286316}, {"image_id": 7, "category_id": 1, "bbox": [775.5195922851562, 413.4855041503906, 214.49212646484375, 306.5144958496094], "score": 0.6637911200523376}, {"image_id": 7, "category_id": 1, "bbox": [654.7022705078125, 192.6926727294922, 295.0128173828125, 444.9237823486328], "score": 0.28122666478157043}, {"image_id": 7, "category_id": 1, "bbox": [440.9662780761719, 16.335468292236328, 516.7436828613281, 640.8083915710449], "score": 0.1363172084093094}, {"image_id": 7, "category_id": 1, "bbox": [789.7010498046875, 285.5158996582031, 245.77685546875, 422.3893737792969], "score": 0.12220050394535065}, {"image_id": 7, "category_id": 1, "bbox": [533.9930419921875, 369.8379821777344, 298.2177734375, 350.1620178222656], "score": 0.11817862093448639}, {"image_id": 7, "category_id": 1, "bbox": [257.1998291015625, 33.113136291503906, 226.09869384765625, 400.40111541748047], "score": 0.11364658176898956}, {"image_id": 7, "category_id": 1, "bbox": [551.9656372070312, 74.34455108642578, 288.32733154296875, 405.5497055053711], "score": 0.10085663944482803}, {"image_id": 7, "category_id": 1, "bbox": [297.77642822265625, 7.4172821044921875, 252.5179443359375, 393.1324920654297], "score": 0.08801956474781036}, {"image_id": 7, "category_id": 1, "bbox": [300.25927734375, 90.2318115234375, 580.048583984375, 621.8085327148438], "score": 0.07983501255512238}, {"image_id": 7, "category_id": 1, "bbox": [521.3123168945312, 150.00222778320312, 276.517822265625, 418.2303161621094], "score": 0.07788187265396118}, {"image_id": 7, "category_id": 1, "bbox": [0.0, 15.830543518066406, 662.94287109375, 635.9220809936523], "score": 0.07497076690196991}, {"image_id": 7, "category_id": 1, "bbox": [531.0865478515625, 93.25562286376953, 565.5113525390625, 622.1451950073242], "score": 0.0711488351225853}, {"image_id": 7, "category_id": 1, "bbox": [513.6532592773438, 34.1805419921875, 266.21331787109375, 403.7594909667969], "score": 0.06919439136981964}, {"image_id": 7, "category_id": 1, "bbox": [693.0292358398438, 363.6356506347656, 288.6046142578125, 356.3643493652344], "score": 0.06230888143181801}, {"image_id": 7, "category_id": 1, "bbox": [347.89068603515625, 29.200733184814453, 271.02978515625, 411.9144096374512], "score": 0.0594698041677475}, {"image_id": 7, "category_id": 1, "bbox": [1031.2833251953125, 161.24095153808594, 224.0631103515625, 349.07557678222656], "score": 0.05807480216026306}, {"image_id": 7, "category_id": 1, "bbox": [1021.8143920898438, 38.929710388183594, 232.92010498046875, 411.6409683227539], "score": 0.05299000069499016}, {"image_id": 7, "category_id": 1, "bbox": [150.26742553710938, 0.0, 287.1914367675781, 401.87298583984375], "score": 0.051814112812280655}, {"image_id": 7, "category_id": 1, "bbox": [423.54376220703125, 239.69908142089844, 564.1301879882812, 480.30091857910156], "score": 0.046369731426239014}, {"image_id": 7, "category_id": 1, "bbox": [891.343017578125, 172.88096618652344, 237.907958984375, 410.1459503173828], "score": 0.04607747495174408}, {"image_id": 7, "category_id": 1, "bbox": [856.9736938476562, 296.3630676269531, 273.12310791015625, 396.8785095214844], "score": 0.04384654387831688}, {"image_id": 7, "category_id": 1, "bbox": [939.12255859375, 207.72828674316406, 242.779052734375, 409.1182098388672], "score": 0.04100557044148445}, {"image_id": 7, "category_id": 1, "bbox": [0.0, 273.2300109863281, 285.5438537597656, 424.3643493652344], "score": 0.03954463079571724}, {"image_id": 7, "category_id": 2, "bbox": [511.38140869140625, 335.75970458984375, 274.6190185546875, 367.4239501953125], "score": 0.2818993628025055}, {"image_id": 7, "category_id": 2, "bbox": [515.6502685546875, 186.0267791748047, 284.479248046875, 451.83802795410156], "score": 0.18253354728221893}, {"image_id": 7, "category_id": 2, "bbox": [439.1817932128906, 374.11322021484375, 290.8903503417969, 345.88677978515625], "score": 0.1269722282886505}, {"image_id": 7, "category_id": 2, "bbox": [592.8676147460938, 316.0989685058594, 262.95538330078125, 400.4333190917969], "score": 0.11955208331346512}, {"image_id": 7, "category_id": 2, "bbox": [435.0103759765625, 248.5118408203125, 295.572021484375, 406.39459228515625], "score": 0.08723384886980057}, {"image_id": 7, "category_id": 2, "bbox": [775.741455078125, 442.7492980957031, 217.1368408203125, 277.2507019042969], "score": 0.0815587118268013}, {"image_id": 7, "category_id": 2, "bbox": [654.7022705078125, 192.6926727294922, 295.0128173828125, 444.9237823486328], "score": 0.07124905288219452}, {"image_id": 7, "category_id": 2, "bbox": [352.9539489746094, 324.1393127441406, 320.6612854003906, 385.4913024902344], "score": 0.07114387303590775}, {"image_id": 7, "category_id": 2, "bbox": [587.26220703125, 170.1125946044922, 267.228759765625, 438.91175842285156], "score": 0.0704040378332138}, {"image_id": 7, "category_id": 2, "bbox": [1026.3974609375, 87.85762786865234, 233.2650146484375, 377.02100372314453], "score": 0.06378202140331268}, {"image_id": 7, "category_id": 2, "bbox": [768.1732177734375, 284.89239501953125, 237.75933837890625, 421.60394287109375], "score": 0.05918990820646286}, {"image_id": 7, "category_id": 2, "bbox": [300.25927734375, 90.2318115234375, 580.048583984375, 621.8085327148438], "score": 0.056298814713954926}, {"image_id": 7, "category_id": 2, "bbox": [951.5792236328125, 83.93995666503906, 228.7747802734375, 383.42771911621094], "score": 0.05580630525946617}, {"image_id": 7, "category_id": 2, "bbox": [891.4163818359375, 150.0934295654297, 245.0291748046875, 385.97584533691406], "score": 0.05399094521999359}, {"image_id": 7, "category_id": 2, "bbox": [270.2398376464844, 280.88275146484375, 335.5255432128906, 408.58636474609375], "score": 0.05364108830690384}, {"image_id": 7, "category_id": 2, "bbox": [207.18572998046875, 316.4754638671875, 323.01470947265625, 403.5245361328125], "score": 0.053316980600357056}, {"image_id": 7, "category_id": 2, "bbox": [577.1402587890625, 464.9170227050781, 347.902099609375, 255.08297729492188], "score": 0.052245162427425385}, {"image_id": 7, "category_id": 2, "bbox": [732.306640625, 512.7114868164062, 308.686767578125, 207.28851318359375], "score": 0.05150671675801277}, {"image_id": 7, "category_id": 2, "bbox": [852.19140625, 317.9924621582031, 289.01708984375, 402.0075378417969], "score": 0.05008357763290405}, {"image_id": 7, "category_id": 2, "bbox": [874.3478393554688, 38.82190704345703, 258.47283935546875, 396.7429428100586], "score": 0.04996572062373161}, {"image_id": 7, "category_id": 2, "bbox": [100.13284301757812, 167.46910095214844, 702.2733459472656, 552.5308990478516], "score": 0.048710744827985764}, {"image_id": 7, "category_id": 2, "bbox": [514.149169921875, 79.1510009765625, 275.7030029296875, 388.5951843261719], "score": 0.048283521085977554}, {"image_id": 7, "category_id": 2, "bbox": [349.8311462402344, 214.58827209472656, 311.0777282714844, 394.04942321777344], "score": 0.04753486067056656}, {"image_id": 7, "category_id": 2, "bbox": [939.12255859375, 207.72828674316406, 242.779052734375, 409.1182098388672], "score": 0.04688173159956932}, {"image_id": 7, "category_id": 2, "bbox": [786.8816528320312, 0.0, 300.14471435546875, 396.1904296875], "score": 0.0468762032687664}, {"image_id": 7, "category_id": 2, "bbox": [1032.3060302734375, 185.22076416015625, 222.6434326171875, 365.160888671875], "score": 0.046653326600790024}, {"image_id": 7, "category_id": 2, "bbox": [693.0292358398438, 363.6356506347656, 288.6046142578125, 356.3643493652344], "score": 0.046039704233407974}, {"image_id": 7, "category_id": 2, "bbox": [2.84906005859375, 84.69548797607422, 656.6051025390625, 610.1272048950195], "score": 0.04583362862467766}, {"image_id": 7, "category_id": 2, "bbox": [247.3118896484375, 311.6534729003906, 670.9571533203125, 408.3465270996094], "score": 0.04514647647738457}, {"image_id": 7, "category_id": 2, "bbox": [499.5958251953125, 303.3591613769531, 669.3096923828125, 416.6408386230469], "score": 0.04440008103847504}, {"image_id": 7, "category_id": 2, "bbox": [343.804443359375, 0.0, 281.29534912109375, 317.5802001953125], "score": 0.043061885982751846}, {"image_id": 7, "category_id": 2, "bbox": [260.66693115234375, 0.0, 217.52813720703125, 355.9234619140625], "score": 0.042913101613521576}, {"image_id": 7, "category_id": 2, "bbox": [531.0865478515625, 93.25562286376953, 565.5113525390625, 622.1451950073242], "score": 0.04193935543298721}, {"image_id": 7, "category_id": 2, "bbox": [813.2662963867188, 213.53260803222656, 241.81488037109375, 402.9410858154297], "score": 0.04177385941147804}, {"image_id": 7, "category_id": 2, "bbox": [501.913818359375, 515.1275634765625, 355.2816162109375, 204.8724365234375], "score": 0.04163682088255882}, {"image_id": 7, "category_id": 2, "bbox": [205.34510803222656, 210.884521484375, 324.1420135498047, 402.25689697265625], "score": 0.04128006100654602}, {"image_id": 7, "category_id": 2, "bbox": [141.13917541503906, 273.75439453125, 325.8880157470703, 427.65203857421875], "score": 0.041180819272994995}, {"image_id": 7, "category_id": 2, "bbox": [594.071533203125, 0.0, 305.400146484375, 368.8407287597656], "score": 0.040655314922332764}, {"image_id": 7, "category_id": 2, "bbox": [1021.288330078125, 0.0, 241.9566650390625, 366.8496398925781], "score": 0.04055698588490486}, {"image_id": 7, "category_id": 2, "bbox": [297.77642822265625, 7.4172821044921875, 252.5179443359375, 393.1324920654297], "score": 0.039907969534397125}, {"image_id": 7, "category_id": 2, "bbox": [597.963134765625, 78.33509063720703, 252.161865234375, 399.85143280029297], "score": 0.03957502916455269}, {"image_id": 7, "category_id": 2, "bbox": [798.3861694335938, 105.3497543334961, 291.16693115234375, 402.96973419189453], "score": 0.039399147033691406}, {"image_id": 7, "category_id": 3, "bbox": [533.9930419921875, 369.8379821777344, 298.2177734375, 350.1620178222656], "score": 0.3593442440032959}, {"image_id": 7, "category_id": 3, "bbox": [543.0361328125, 210.1212158203125, 312.998291015625, 473.476806640625], "score": 0.24332883954048157}, {"image_id": 7, "category_id": 3, "bbox": [775.741455078125, 442.7492980957031, 217.1368408203125, 277.2507019042969], "score": 0.1873246133327484}, {"image_id": 7, "category_id": 3, "bbox": [441.6794738769531, 159.88697814941406, 524.5331726074219, 560.1130218505859], "score": 0.12961114943027496}, {"image_id": 7, "category_id": 3, "bbox": [439.1817932128906, 374.11322021484375, 290.8903503417969, 345.88677978515625], "score": 0.12491157650947571}, {"image_id": 7, "category_id": 3, "bbox": [778.053466796875, 331.2846984863281, 260.130615234375, 387.2222595214844], "score": 0.08990829437971115}, {"image_id": 7, "category_id": 3, "bbox": [1026.3974609375, 87.85762786865234, 233.2650146484375, 377.02100372314453], "score": 0.08185520768165588}, {"image_id": 7, "category_id": 3, "bbox": [531.0865478515625, 93.25562286376953, 565.5113525390625, 622.1451950073242], "score": 0.06722726672887802}, {"image_id": 7, "category_id": 3, "bbox": [300.25927734375, 90.2318115234375, 580.048583984375, 621.8085327148438], "score": 0.0627114549279213}, {"image_id": 7, "category_id": 3, "bbox": [516.80810546875, 401.8154602050781, 213.3731689453125, 187.48464965820312], "score": 0.060217346996068954}, {"image_id": 7, "category_id": 3, "bbox": [499.5958251953125, 303.3591613769531, 669.3096923828125, 416.6408386230469], "score": 0.056487273424863815}, {"image_id": 7, "category_id": 3, "bbox": [67.11212158203125, 17.91712188720703, 743.5320434570312, 634.0502243041992], "score": 0.055248063057661057}, {"image_id": 7, "category_id": 3, "bbox": [453.24688720703125, 421.1666259765625, 212.65911865234375, 183.04144287109375], "score": 0.05219947174191475}, {"image_id": 7, "category_id": 3, "bbox": [445.7545471191406, 0.0, 527.8491516113281, 591.3651123046875], "score": 0.050801832228899}, {"image_id": 7, "category_id": 3, "bbox": [546.0054931640625, 439.8708190917969, 212.37078857421875, 181.63290405273438], "score": 0.04979199171066284}, {"image_id": 7, "category_id": 3, "bbox": [951.5792236328125, 83.93995666503906, 228.7747802734375, 383.42771911621094], "score": 0.04963277652859688}, {"image_id": 7, "category_id": 3, "bbox": [643.0676879882812, 405.1342468261719, 213.31378173828125, 179.80307006835938], "score": 0.0488504022359848}, {"image_id": 7, "category_id": 3, "bbox": [1032.3060302734375, 185.22076416015625, 222.6434326171875, 365.160888671875], "score": 0.048625532537698746}, {"image_id": 7, "category_id": 3, "bbox": [708.4664306640625, 422.16522216796875, 212.27362060546875, 177.4368896484375], "score": 0.04849542677402496}, {"image_id": 7, "category_id": 3, "bbox": [746.3216552734375, 471.9095153808594, 213.44061279296875, 185.92916870117188], "score": 0.046146880835294724}, {"image_id": 7, "category_id": 3, "bbox": [575.4099731445312, 406.0338134765625, 216.17633056640625, 179.42291259765625], "score": 0.045227549970149994}, {"image_id": 7, "category_id": 3, "bbox": [693.0292358398438, 363.6356506347656, 288.6046142578125, 356.3643493652344], "score": 0.04480327293276787}, {"image_id": 7, "category_id": 3, "bbox": [643.9442749023438, 457.9296569824219, 211.61395263671875, 181.00070190429688], "score": 0.04391402378678322}, {"image_id": 7, "category_id": 3, "bbox": [546.9727172851562, 368.44921875, 214.64825439453125, 181.47442626953125], "score": 0.04369533434510231}, {"image_id": 7, "category_id": 3, "bbox": [588.2669067382812, 425.6419982910156, 314.4925537109375, 294.3580017089844], "score": 0.04358764737844467}, {"image_id": 7, "category_id": 3, "bbox": [518.6806640625, 474.7953186035156, 212.69134521484375, 183.91952514648438], "score": 0.04303980618715286}, {"image_id": 7, "category_id": 3, "bbox": [737.4876708984375, 544.6648559570312, 215.22467041015625, 175.33514404296875], "score": 0.04295118898153305}, {"image_id": 7, "category_id": 3, "bbox": [92.01971435546875, 240.406982421875, 712.1656494140625, 479.593017578125], "score": 0.04226689785718918}, {"image_id": 7, "category_id": 3, "bbox": [809.4808349609375, 471.97003173828125, 208.8839111328125, 183.33917236328125], "score": 0.04181203246116638}, {"image_id": 7, "category_id": 3, "bbox": [804.3861083984375, 401.62933349609375, 208.78656005859375, 183.036376953125], "score": 0.041683148592710495}, {"image_id": 7, "category_id": 3, "bbox": [641.8637084960938, 333.9493408203125, 215.07061767578125, 177.88726806640625], "score": 0.0410153791308403}, {"image_id": 7, "category_id": 3, "bbox": [435.0103759765625, 248.5118408203125, 295.572021484375, 406.39459228515625], "score": 0.04075957089662552}, {"image_id": 7, "category_id": 3, "bbox": [670.5516967773438, 547.6573486328125, 219.69500732421875, 172.3426513671875], "score": 0.04037197679281235}, {"image_id": 7, "category_id": 3, "bbox": [419.318359375, 456.60357666015625, 215.7039794921875, 186.1038818359375], "score": 0.039981599897146225}, {"image_id": 8, "category_id": 1, "bbox": [656.4703369140625, 151.6507110595703, 188.97021484375, 387.4842987060547], "score": 0.7373226881027222}, {"image_id": 8, "category_id": 1, "bbox": [674.205322265625, 169.66445922851562, 247.2772216796875, 394.0945129394531], "score": 0.3976251780986786}, {"image_id": 8, "category_id": 1, "bbox": [616.3716430664062, 185.9932861328125, 233.117919921875, 451.98944091796875], "score": 0.3615182042121887}, {"image_id": 8, "category_id": 1, "bbox": [579.734375, 316.3863220214844, 261.01727294921875, 376.9328308105469], "score": 0.2202049195766449}, {"image_id": 8, "category_id": 1, "bbox": [590.1630249023438, 75.43645477294922, 255.36016845703125, 419.43399810791016], "score": 0.16113384068012238}, {"image_id": 8, "category_id": 1, "bbox": [851.3003540039062, 166.1807403564453, 300.50640869140625, 431.10081481933594], "score": 0.12207100540399551}, {"image_id": 8, "category_id": 1, "bbox": [508.4986572265625, 177.3867645263672, 281.38824462890625, 398.2414093017578], "score": 0.07083781808614731}, {"image_id": 8, "category_id": 1, "bbox": [0.0, 10.867607116699219, 508.03936767578125, 634.2504959106445], "score": 0.059866610914468765}, {"image_id": 8, "category_id": 1, "bbox": [278.0474853515625, 8.179183959960938, 597.545654296875, 632.1501617431641], "score": 0.059226538985967636}, {"image_id": 8, "category_id": 1, "bbox": [641.9735107421875, 0.0, 263.404541015625, 361.0635070800781], "score": 0.05643809586763382}, {"image_id": 8, "category_id": 1, "bbox": [495.0250244140625, 291.98822021484375, 309.22808837890625, 370.95599365234375], "score": 0.053579024970531464}, {"image_id": 8, "category_id": 1, "bbox": [651.713623046875, 234.4825439453125, 290.52203369140625, 428.10589599609375], "score": 0.051180653274059296}, {"image_id": 8, "category_id": 1, "bbox": [448.89263916015625, 110.19831085205078, 513.5753784179688, 592.293327331543], "score": 0.051036179065704346}, {"image_id": 8, "category_id": 1, "bbox": [251.65240478515625, 232.3758087158203, 621.1736450195312, 487.6241912841797], "score": 0.04558030143380165}, {"image_id": 8, "category_id": 1, "bbox": [169.64971923828125, 3.979076385498047, 261.7822265625, 375.9713325500488], "score": 0.04295913875102997}, {"image_id": 8, "category_id": 1, "bbox": [555.3302001953125, 0.0, 565.1533203125, 555.9094848632812], "score": 0.040976982563734055}, {"image_id": 8, "category_id": 1, "bbox": [791.489013671875, 178.57977294921875, 488.510986328125, 541.4202270507812], "score": 0.040965162217617035}, {"image_id": 8, "category_id": 1, "bbox": [804.7510375976562, 228.2188262939453, 305.15374755859375, 421.7339324951172], "score": 0.0409485325217247}, {"image_id": 8, "category_id": 1, "bbox": [91.17095947265625, 99.33226776123047, 710.680419921875, 598.7482986450195], "score": 0.04048750922083855}, {"image_id": 8, "category_id": 1, "bbox": [918.3189697265625, 224.4425506591797, 254.8609619140625, 428.96778869628906], "score": 0.04016031324863434}, {"image_id": 8, "category_id": 1, "bbox": [0.0, 243.38768005371094, 272.8200988769531, 422.6547393798828], "score": 0.039384715259075165}, {"image_id": 8, "category_id": 1, "bbox": [0.0, 176.89573669433594, 647.2017822265625, 543.1042633056641], "score": 0.03903040289878845}, {"image_id": 8, "category_id": 1, "bbox": [724.3057250976562, 144.4635009765625, 275.44476318359375, 402.01629638671875], "score": 0.03783225268125534}, {"image_id": 8, "category_id": 2, "bbox": [579.5671997070312, 290.4421081542969, 265.33319091796875, 374.3681335449219], "score": 0.4094294607639313}, {"image_id": 8, "category_id": 2, "bbox": [495.3659362792969, 259.75018310546875, 309.6445617675781, 380.38165283203125], "score": 0.2800469696521759}, {"image_id": 8, "category_id": 2, "bbox": [648.5614013671875, 278.83465576171875, 297.0821533203125, 401.4835205078125], "score": 0.1512443721294403}, {"image_id": 8, "category_id": 2, "bbox": [499.830322265625, 372.1707763671875, 274.0179443359375, 347.8292236328125], "score": 0.10415538400411606}, {"image_id": 8, "category_id": 2, "bbox": [571.3369140625, 412.9598693847656, 258.9085693359375, 307.0401306152344], "score": 0.10351886600255966}, {"image_id": 8, "category_id": 2, "bbox": [591.373046875, 165.4379119873047, 253.2318115234375, 415.34217834472656], "score": 0.08845286071300507}, {"image_id": 8, "category_id": 2, "bbox": [655.4840087890625, 126.3590087890625, 190.0645751953125, 395.942138671875], "score": 0.08101605623960495}, {"image_id": 8, "category_id": 2, "bbox": [417.47760009765625, 279.2547912597656, 297.5399169921875, 400.6411437988281], "score": 0.07419679313898087}, {"image_id": 8, "category_id": 2, "bbox": [655.9952392578125, 434.66192626953125, 306.87628173828125, 285.33807373046875], "score": 0.06644438952207565}, {"image_id": 8, "category_id": 2, "bbox": [199.3364715576172, 2.3371009826660156, 307.26087951660156, 381.2831687927246], "score": 0.061608705669641495}, {"image_id": 8, "category_id": 2, "bbox": [803.1551513671875, 199.65074157714844, 312.53076171875, 422.7836456298828], "score": 0.06103771924972534}, {"image_id": 8, "category_id": 2, "bbox": [280.3919677734375, 0.0, 258.733642578125, 315.599609375], "score": 0.05982782691717148}, {"image_id": 8, "category_id": 2, "bbox": [166.06655883789062, 0.0, 280.2371520996094, 310.00115966796875], "score": 0.05789730325341225}, {"image_id": 8, "category_id": 2, "bbox": [751.697265625, 275.0790100097656, 258.08624267578125, 400.9725036621094], "score": 0.054726194590330124}, {"image_id": 8, "category_id": 2, "bbox": [288.7310485839844, 168.74656677246094, 574.7602844238281, 551.2534332275391], "score": 0.05455983802676201}, {"image_id": 8, "category_id": 2, "bbox": [591.5557861328125, 482.5639343261719, 323.145263671875, 237.43606567382812], "score": 0.05268795043230057}, {"image_id": 8, "category_id": 2, "bbox": [721.5717163085938, 391.9979248046875, 311.25323486328125, 328.0020751953125], "score": 0.05231486260890961}, {"image_id": 8, "category_id": 2, "bbox": [913.120361328125, 167.7422332763672, 262.869873046875, 428.31373596191406], "score": 0.051987696439027786}, {"image_id": 8, "category_id": 2, "bbox": [395.39312744140625, 438.9485778808594, 330.505615234375, 281.0514221191406], "score": 0.05136581137776375}, {"image_id": 8, "category_id": 2, "bbox": [806.687744140625, 301.7964782714844, 280.277099609375, 409.2057189941406], "score": 0.04770199581980705}, {"image_id": 8, "category_id": 2, "bbox": [459.18017578125, 478.2519836425781, 341.97174072265625, 241.74801635742188], "score": 0.04767514020204544}, {"image_id": 8, "category_id": 2, "bbox": [719.009033203125, 0.0, 284.60723876953125, 307.6213684082031], "score": 0.04576853662729263}, {"image_id": 8, "category_id": 2, "bbox": [1008.16748046875, 3.7300643920898438, 254.2352294921875, 391.97562408447266], "score": 0.04572173207998276}, {"image_id": 8, "category_id": 2, "bbox": [642.6259155273438, 0.0, 261.89788818359375, 317.3670349121094], "score": 0.04499411582946777}, {"image_id": 8, "category_id": 2, "bbox": [91.17095947265625, 99.33226776123047, 710.680419921875, 598.7482986450195], "score": 0.044230587780475616}, {"image_id": 8, "category_id": 2, "bbox": [863.650146484375, 252.2896270751953, 281.951416015625, 434.36839294433594], "score": 0.044156547635793686}, {"image_id": 8, "category_id": 2, "bbox": [780.310546875, 438.7919616699219, 318.77685546875, 281.2080383300781], "score": 0.04385851323604584}, {"image_id": 8, "category_id": 2, "bbox": [667.791015625, 78.6534194946289, 255.07513427734375, 421.29015350341797], "score": 0.0435849167406559}, {"image_id": 8, "category_id": 2, "bbox": [789.707763671875, 8.628713607788086, 283.83349609375, 373.99415016174316], "score": 0.04284917563199997}, {"image_id": 8, "category_id": 2, "bbox": [132.21356201171875, 39.7972526550293, 254.131591796875, 377.0663642883301], "score": 0.042601533234119415}, {"image_id": 8, "category_id": 2, "bbox": [275.02587890625, 61.338043212890625, 281.8514404296875, 402.3250732421875], "score": 0.04245452955365181}, {"image_id": 8, "category_id": 2, "bbox": [506.37896728515625, 141.99282836914062, 289.91583251953125, 401.5886535644531], "score": 0.04201895743608475}, {"image_id": 8, "category_id": 2, "bbox": [529.2277221679688, 289.434814453125, 618.7601928710938, 430.565185546875], "score": 0.04120573773980141}, {"image_id": 8, "category_id": 2, "bbox": [338.94207763671875, 402.8692932128906, 306.243896484375, 317.1307067871094], "score": 0.04099868983030319}, {"image_id": 8, "category_id": 2, "bbox": [925.5709228515625, 9.184038162231445, 287.0272216796875, 380.7454357147217], "score": 0.040613822638988495}, {"image_id": 8, "category_id": 2, "bbox": [860.080322265625, 123.60834503173828, 274.7767333984375, 387.8669967651367], "score": 0.04046463221311569}, {"image_id": 8, "category_id": 2, "bbox": [909.9329833984375, 438.22998046875, 327.5072021484375, 281.77001953125], "score": 0.038468509912490845}, {"image_id": 8, "category_id": 2, "bbox": [612.301513671875, 364.1591491699219, 207.32861328125, 188.25851440429688], "score": 0.037941716611385345}, {"image_id": 8, "category_id": 2, "bbox": [335.241455078125, 0.0, 288.4566650390625, 311.2264099121094], "score": 0.03774504363536835}, {"image_id": 8, "category_id": 2, "bbox": [697.6127319335938, 519.5723876953125, 361.10040283203125, 200.4276123046875], "score": 0.03765880689024925}, {"image_id": 8, "category_id": 2, "bbox": [584.8720092773438, 434.1392822265625, 216.00762939453125, 193.456787109375], "score": 0.037594038993120193}, {"image_id": 8, "category_id": 2, "bbox": [1052.6824951171875, 84.43579864501953, 227.3175048828125, 386.27257537841797], "score": 0.03748723492026329}, {"image_id": 8, "category_id": 2, "bbox": [929.0133056640625, 87.2901840209961, 250.0235595703125, 382.5304641723633], "score": 0.03690017759799957}, {"image_id": 8, "category_id": 3, "bbox": [586.1864013671875, 289.2456359863281, 266.5364990234375, 377.6338806152344], "score": 0.7346656918525696}, {"image_id": 8, "category_id": 3, "bbox": [495.3659362792969, 259.75018310546875, 309.6445617675781, 380.38165283203125], "score": 0.3408264219760895}, {"image_id": 8, "category_id": 3, "bbox": [651.43017578125, 168.6580810546875, 194.38751220703125, 398.4794921875], "score": 0.26237034797668457}, {"image_id": 8, "category_id": 3, "bbox": [851.3003540039062, 166.1807403564453, 300.50640869140625, 431.10081481933594], "score": 0.17500357329845428}, {"image_id": 8, "category_id": 3, "bbox": [648.5614013671875, 278.83465576171875, 297.0821533203125, 401.4835205078125], "score": 0.14145712554454803}, {"image_id": 8, "category_id": 3, "bbox": [169.41522216796875, 30.85953140258789, 253.35498046875, 385.229793548584], "score": 0.13608595728874207}, {"image_id": 8, "category_id": 3, "bbox": [804.7510375976562, 228.2188262939453, 305.15374755859375, 421.7339324951172], "score": 0.10250163078308105}, {"image_id": 8, "category_id": 3, "bbox": [667.1956787109375, 152.1622772216797, 257.287841796875, 382.8355255126953], "score": 0.10181612521409988}, {"image_id": 8, "category_id": 3, "bbox": [199.3364715576172, 2.3371009826660156, 307.26087951660156, 381.2831687927246], "score": 0.09601906687021255}, {"image_id": 8, "category_id": 3, "bbox": [918.3189697265625, 224.4425506591797, 254.8609619140625, 428.96778869628906], "score": 0.08161073178052902}, {"image_id": 8, "category_id": 3, "bbox": [613.3096923828125, 346.62542724609375, 206.5067138671875, 186.43487548828125], "score": 0.06737247854471207}, {"image_id": 8, "category_id": 3, "bbox": [590.1630249023438, 75.43645477294922, 255.36016845703125, 419.43399810791016], "score": 0.058708902448415756}, {"image_id": 8, "category_id": 3, "bbox": [280.3919677734375, 0.0, 258.733642578125, 315.599609375], "score": 0.0555167980492115}, {"image_id": 8, "category_id": 3, "bbox": [584.8720092773438, 434.1392822265625, 216.00762939453125, 193.456787109375], "score": 0.05500423535704613}, {"image_id": 8, "category_id": 3, "bbox": [642.5379638671875, 313.1095886230469, 208.4849853515625, 181.79928588867188], "score": 0.05317741632461548}, {"image_id": 8, "category_id": 3, "bbox": [549.228515625, 347.9256896972656, 211.1572265625, 185.55221557617188], "score": 0.05279277265071869}, {"image_id": 8, "category_id": 3, "bbox": [865.84423828125, 297.52911376953125, 277.1337890625, 418.98504638671875], "score": 0.052505526691675186}, {"image_id": 8, "category_id": 3, "bbox": [611.3715209960938, 399.35028076171875, 214.91815185546875, 191.5596923828125], "score": 0.05218765512108803}, {"image_id": 8, "category_id": 3, "bbox": [448.89263916015625, 110.19831085205078, 513.5753784179688, 592.293327331543], "score": 0.05208839848637581}, {"image_id": 8, "category_id": 3, "bbox": [81.35104370117188, 178.389404296875, 726.8439636230469, 541.610595703125], "score": 0.04960538446903229}, {"image_id": 8, "category_id": 3, "bbox": [677.2559814453125, 350.9105529785156, 207.3426513671875, 178.43917846679688], "score": 0.0486726388335228}, {"image_id": 8, "category_id": 3, "bbox": [517.8961181640625, 437.3795166015625, 216.92535400390625, 189.05596923828125], "score": 0.04694030433893204}, {"image_id": 8, "category_id": 3, "bbox": [551.1087646484375, 401.0065612792969, 214.369140625, 189.12045288085938], "score": 0.04671424627304077}, {"image_id": 8, "category_id": 3, "bbox": [580.121337890625, 313.54669189453125, 210.80401611328125, 182.5177001953125], "score": 0.04577704146504402}, {"image_id": 8, "category_id": 3, "bbox": [0.0, 16.934017181396484, 652.39208984375, 632.868106842041], "score": 0.04460371285676956}, {"image_id": 8, "category_id": 3, "bbox": [675.4215087890625, 405.07794189453125, 214.16351318359375, 179.79296875], "score": 0.04425964504480362}, {"image_id": 8, "category_id": 3, "bbox": [577.6177978515625, 488.6298522949219, 222.267578125, 194.69320678710938], "score": 0.044089555740356445}, {"image_id": 8, "category_id": 3, "bbox": [513.054443359375, 490.1424560546875, 219.3885498046875, 191.27392578125], "score": 0.04395771026611328}, {"image_id": 8, "category_id": 3, "bbox": [417.47760009765625, 279.2547912597656, 297.5399169921875, 400.6411437988281], "score": 0.043525081127882004}, {"image_id": 8, "category_id": 3, "bbox": [643.9474487304688, 262.98724365234375, 213.82403564453125, 176.8748779296875], "score": 0.04232720285654068}, {"image_id": 8, "category_id": 3, "bbox": [646.183837890625, 439.6156921386719, 215.8594970703125, 184.91867065429688], "score": 0.04150444269180298}, {"image_id": 8, "category_id": 3, "bbox": [757.8163452148438, 242.66062927246094, 256.44903564453125, 400.9988555908203], "score": 0.03954354301095009}, {"image_id": 8, "category_id": 3, "bbox": [548.9442138671875, 281.1544189453125, 212.3663330078125, 174.7218017578125], "score": 0.03920748084783554}, {"image_id": 8, "category_id": 3, "bbox": [706.974609375, 317.1756896972656, 211.6942138671875, 172.7769775390625], "score": 0.03846166282892227}, {"image_id": 9, "category_id": 1, "bbox": [870.1323852539062, 205.73561096191406, 397.92181396484375, 639.1188201904297], "score": 0.3932396173477173}, {"image_id": 9, "category_id": 1, "bbox": [865.179931640625, 421.00543212890625, 415.3348388671875, 603.0204467773438], "score": 0.21300268173217773}, {"image_id": 9, "category_id": 1, "bbox": [939.2089233398438, 75.30713653564453, 315.71612548828125, 554.6393966674805], "score": 0.20521889626979828}, {"image_id": 9, "category_id": 1, "bbox": [985.7855224609375, 202.83824157714844, 404.4580078125, 635.3813629150391], "score": 0.12601342797279358}, {"image_id": 9, "category_id": 1, "bbox": [675.0484008789062, 22.713478088378906, 759.0917358398438, 946.2409286499023], "score": 0.0961579829454422}, {"image_id": 9, "category_id": 1, "bbox": [750.3651123046875, 272.53662109375, 434.7081298828125, 586.208251953125], "score": 0.07594629377126694}, {"image_id": 9, "category_id": 1, "bbox": [804.1790771484375, 143.7874298095703, 796.527587890625, 915.4073944091797], "score": 0.07573810964822769}, {"image_id": 9, "category_id": 1, "bbox": [1679.126220703125, 0.0, 240.873779296875, 224.1324005126953], "score": 0.07187424600124359}, {"image_id": 9, "category_id": 1, "bbox": [190.38937377929688, 49.702430725097656, 416.4510803222656, 575.6456527709961], "score": 0.06757698208093643}, {"image_id": 9, "category_id": 1, "bbox": [471.99884033203125, 135.84153747558594, 827.9761352539062, 936.8020172119141], "score": 0.06070150434970856}, {"image_id": 9, "category_id": 1, "bbox": [659.0844116210938, 366.8367919921875, 785.1596069335938, 713.1632080078125], "score": 0.0474865697324276}, {"image_id": 9, "category_id": 1, "bbox": [919.4672241210938, 25.142452239990234, 930.3074340820312, 917.3062171936035], "score": 0.04301414266228676}, {"image_id": 9, "category_id": 1, "bbox": [956.7683715820312, 581.9747924804688, 323.22930908203125, 498.02520751953125], "score": 0.039764512330293655}, {"image_id": 9, "category_id": 1, "bbox": [165.54322814941406, 34.65940856933594, 1046.0782318115234, 933.8484039306641], "score": 0.03903642296791077}, {"image_id": 9, "category_id": 1, "bbox": [977.3602294921875, 377.4853820800781, 412.0224609375, 609.9966735839844], "score": 0.03861355036497116}, {"image_id": 9, "category_id": 1, "bbox": [745.3152465820312, 424.6156005859375, 440.60516357421875, 583.1426391601562], "score": 0.03844050318002701}, {"image_id": 9, "category_id": 1, "bbox": [140.74534606933594, 354.67364501953125, 1057.880142211914, 725.3263549804688], "score": 0.03656233474612236}, {"image_id": 9, "category_id": 1, "bbox": [1092.5955810546875, 169.7401580810547, 415.0689697265625, 600.8293609619141], "score": 0.03475864604115486}, {"image_id": 9, "category_id": 1, "bbox": [914.93701171875, 255.94549560546875, 310.1466064453125, 267.03265380859375], "score": 0.03396284207701683}, {"image_id": 9, "category_id": 2, "bbox": [865.7252197265625, 384.4010314941406, 409.5, 566.3952331542969], "score": 0.24035900831222534}, {"image_id": 9, "category_id": 2, "bbox": [747.2316284179688, 385.4273986816406, 433.16815185546875, 550.6034240722656], "score": 0.16070479154586792}, {"image_id": 9, "category_id": 2, "bbox": [977.3602294921875, 377.4853820800781, 412.0224609375, 609.9966735839844], "score": 0.1378522366285324}, {"image_id": 9, "category_id": 2, "bbox": [945.4480590820312, 519.5164794921875, 331.99835205078125, 560.4835205078125], "score": 0.12590518593788147}, {"image_id": 9, "category_id": 2, "bbox": [906.7534790039062, 202.00674438476562, 364.32794189453125, 637.7270202636719], "score": 0.0956282913684845}, {"image_id": 9, "category_id": 2, "bbox": [1626.775390625, 0.0, 293.224609375, 218.8296356201172], "score": 0.08103203773498535}, {"image_id": 9, "category_id": 2, "bbox": [744.900634765625, 544.1817626953125, 432.0728759765625, 535.8182373046875], "score": 0.07784531265497208}, {"image_id": 9, "category_id": 2, "bbox": [985.7855224609375, 202.83824157714844, 404.4580078125, 635.3813629150391], "score": 0.07097428292036057}, {"image_id": 9, "category_id": 2, "bbox": [1002.6229248046875, 593.4696655273438, 368.6572265625, 486.53033447265625], "score": 0.06314068287611008}, {"image_id": 9, "category_id": 2, "bbox": [850.5430908203125, 648.8118896484375, 410.2940673828125, 431.1881103515625], "score": 0.05126311630010605}, {"image_id": 9, "category_id": 2, "bbox": [471.99884033203125, 135.84153747558594, 827.9761352539062, 936.8020172119141], "score": 0.051183052361011505}, {"image_id": 9, "category_id": 2, "bbox": [1332.9901123046875, 185.9974365234375, 324.248046875, 571.389404296875], "score": 0.04833979904651642}, {"image_id": 9, "category_id": 2, "bbox": [762.5473022460938, 165.41294860839844, 417.29974365234375, 617.5507965087891], "score": 0.0478382408618927}, {"image_id": 9, "category_id": 2, "bbox": [939.2089233398438, 75.30713653564453, 315.71612548828125, 554.6393966674805], "score": 0.04706781357526779}, {"image_id": 9, "category_id": 2, "bbox": [1080.0140380859375, 541.3462524414062, 429.2120361328125, 538.6537475585938], "score": 0.0455753430724144}, {"image_id": 9, "category_id": 2, "bbox": [106.6820068359375, 354.32122802734375, 480.6224365234375, 544.337890625], "score": 0.04467866197228432}, {"image_id": 9, "category_id": 2, "bbox": [1074.8955078125, 314.79034423828125, 467.965087890625, 616.9091796875], "score": 0.044616829603910446}, {"image_id": 9, "category_id": 2, "bbox": [834.8815307617188, 0.0, 392.34124755859375, 503.2972717285156], "score": 0.04457372799515724}, {"image_id": 9, "category_id": 2, "bbox": [1094.4979248046875, 122.61607360839844, 403.9688720703125, 587.1976470947266], "score": 0.04340171441435814}, {"image_id": 9, "category_id": 2, "bbox": [184.80972290039062, 0.0, 411.7839660644531, 556.8599853515625], "score": 0.04320831596851349}, {"image_id": 9, "category_id": 2, "bbox": [173.30746459960938, 126.53386688232422, 383.3853454589844, 629.422737121582], "score": 0.04263585805892944}, {"image_id": 9, "category_id": 2, "bbox": [1719.8973388671875, 0.0, 200.1026611328125, 185.571533203125], "score": 0.042409174144268036}, {"image_id": 9, "category_id": 2, "bbox": [217.10000610351562, 284.8376159667969, 469.0531311035156, 560.3382873535156], "score": 0.04210961237549782}, {"image_id": 9, "category_id": 2, "bbox": [653.9044189453125, 329.8564453125, 384.58740234375, 575.2835693359375], "score": 0.04129871353507042}, {"image_id": 9, "category_id": 2, "bbox": [342.9413757324219, 443.8465270996094, 1023.9358215332031, 636.1534729003906], "score": 0.04114535450935364}, {"image_id": 9, "category_id": 2, "bbox": [1418.309814453125, 192.62039184570312, 377.205322265625, 562.7768859863281], "score": 0.04114309698343277}, {"image_id": 9, "category_id": 2, "bbox": [776.6068725585938, 772.4253540039062, 519.9423217773438, 307.57464599609375], "score": 0.04111527279019356}, {"image_id": 9, "category_id": 2, "bbox": [1209.892822265625, 215.8622589111328, 411.1962890625, 596.9328460693359], "score": 0.04101536050438881}, {"image_id": 9, "category_id": 2, "bbox": [1335.235107421875, 317.9488830566406, 315.4969482421875, 603.5966491699219], "score": 0.04035579785704613}, {"image_id": 9, "category_id": 2, "bbox": [1383.9930419921875, 0.0, 486.2210693359375, 492.36199951171875], "score": 0.039959341287612915}, {"image_id": 9, "category_id": 2, "bbox": [1004.2009887695312, 0.0, 396.23980712890625, 494.197509765625], "score": 0.039346251636743546}, {"image_id": 9, "category_id": 2, "bbox": [1212.355712890625, 19.757211685180664, 388.650146484375, 554.3615627288818], "score": 0.03930914029479027}, {"image_id": 9, "category_id": 2, "bbox": [0.0, 109.09233093261719, 787.7523193359375, 919.4293975830078], "score": 0.03923633694648743}, {"image_id": 9, "category_id": 2, "bbox": [317.17523193359375, 0.0, 436.09637451171875, 585.4498291015625], "score": 0.039110105484724045}, {"image_id": 9, "category_id": 2, "bbox": [614.6310424804688, 650.9364624023438, 449.90301513671875, 429.06353759765625], "score": 0.039004791527986526}, {"image_id": 9, "category_id": 2, "bbox": [200.89834594726562, 425.3152770996094, 507.1254577636719, 610.4665832519531], "score": 0.038117729127407074}, {"image_id": 9, "category_id": 2, "bbox": [1223.994873046875, 364.3293762207031, 401.9166259765625, 616.7199401855469], "score": 0.03792126104235649}, {"image_id": 9, "category_id": 2, "bbox": [1327.0584716796875, 27.232797622680664, 350.0777587890625, 537.8240871429443], "score": 0.037895869463682175}, {"image_id": 9, "category_id": 2, "bbox": [1099.447265625, 0.0, 411.7257080078125, 532.3322143554688], "score": 0.03762432560324669}, {"image_id": 9, "category_id": 2, "bbox": [97.82061767578125, 476.2880554199219, 502.09332275390625, 603.7119445800781], "score": 0.037268564105033875}, {"image_id": 9, "category_id": 2, "bbox": [317.0312805175781, 340.7867736816406, 469.6247863769531, 566.1789855957031], "score": 0.037102110683918}, {"image_id": 9, "category_id": 2, "bbox": [187.35169982910156, 648.6124877929688, 514.3577117919922, 431.38751220703125], "score": 0.03676501661539078}, {"image_id": 9, "category_id": 2, "bbox": [1755.69482421875, 0.0, 164.30517578125, 282.61083984375], "score": 0.036188576370477676}, {"image_id": 9, "category_id": 2, "bbox": [289.14202880859375, 0.0, 483.388916015625, 331.7676086425781], "score": 0.035998161882162094}, {"image_id": 9, "category_id": 2, "bbox": [26.215049743652344, 411.5966796875, 423.82093048095703, 544.981201171875], "score": 0.03523970767855644}, {"image_id": 9, "category_id": 2, "bbox": [379.7488708496094, 0.0, 490.4463195800781, 390.5377502441406], "score": 0.03509150817990303}, {"image_id": 9, "category_id": 2, "bbox": [414.5704650878906, 649.2483520507812, 477.7510681152344, 430.75164794921875], "score": 0.03506218641996384}, {"image_id": 9, "category_id": 2, "bbox": [417.870849609375, 276.11297607421875, 446.5380859375, 583.7518310546875], "score": 0.03478969633579254}, {"image_id": 9, "category_id": 2, "bbox": [113.44143676757812, 0.0, 520.4213562011719, 351.376953125], "score": 0.03478135168552399}, {"image_id": 9, "category_id": 2, "bbox": [147.76634216308594, 258.1976013183594, 1062.649185180664, 821.8023986816406], "score": 0.0346624031662941}, {"image_id": 9, "category_id": 2, "bbox": [882.3492431640625, 0.0, 416.242919921875, 353.8573303222656], "score": 0.03414969891309738}, {"image_id": 9, "category_id": 2, "bbox": [1489.502197265625, 141.029541015625, 420.68603515625, 543.2526245117188], "score": 0.03412275388836861}, {"image_id": 9, "category_id": 2, "bbox": [0.0, 0.0, 796.35791015625, 575.1973876953125], "score": 0.033903732895851135}, {"image_id": 9, "category_id": 3, "bbox": [894.0670776367188, 421.9100036621094, 385.97698974609375, 604.2269592285156], "score": 0.6734268069267273}, {"image_id": 9, "category_id": 3, "bbox": [887.8251342773438, 264.0470275878906, 386.71661376953125, 607.2051696777344], "score": 0.630939781665802}, {"image_id": 9, "category_id": 3, "bbox": [956.7683715820312, 581.9747924804688, 323.22930908203125, 498.02520751953125], "score": 0.2463173121213913}, {"image_id": 9, "category_id": 3, "bbox": [750.9933471679688, 333.1730041503906, 428.35833740234375, 559.1659240722656], "score": 0.23006750643253326}, {"image_id": 9, "category_id": 3, "bbox": [976.6408081054688, 429.6921081542969, 414.43011474609375, 590.9501037597656], "score": 0.20444880425930023}, {"image_id": 9, "category_id": 3, "bbox": [979.5088500976562, 267.5263366699219, 408.81072998046875, 612.9562683105469], "score": 0.14948678016662598}, {"image_id": 9, "category_id": 3, "bbox": [748.9661254882812, 467.70782470703125, 429.74273681640625, 597.0027465820312], "score": 0.10961324721574783}, {"image_id": 9, "category_id": 3, "bbox": [688.4004516601562, 155.15127563476562, 738.1705932617188, 905.7170104980469], "score": 0.10912009328603745}, {"image_id": 9, "category_id": 3, "bbox": [1626.775390625, 0.0, 293.224609375, 218.8296356201172], "score": 0.1047540083527565}, {"image_id": 9, "category_id": 3, "bbox": [939.2089233398438, 75.30713653564453, 315.71612548828125, 554.6393966674805], "score": 0.09706030786037445}, {"image_id": 9, "category_id": 3, "bbox": [797.8114013671875, 7.9641265869140625, 817.423583984375, 956.0599822998047], "score": 0.07318729162216187}, {"image_id": 9, "category_id": 3, "bbox": [213.92724609375, 48.890892028808594, 423.554443359375, 578.9822769165039], "score": 0.05958142504096031}, {"image_id": 9, "category_id": 3, "bbox": [1719.8973388671875, 0.0, 200.1026611328125, 185.571533203125], "score": 0.05693536996841431}, {"image_id": 9, "category_id": 3, "bbox": [1343.64697265625, 274.53570556640625, 302.211181640625, 564.1097412109375], "score": 0.05418822169303894}, {"image_id": 9, "category_id": 3, "bbox": [1708.491943359375, 11.79532527923584, 211.508056640625, 279.36589908599854], "score": 0.05351770296692848}, {"image_id": 9, "category_id": 3, "bbox": [1003.2449951171875, 644.697021484375, 381.0859375, 435.302978515625], "score": 0.048480693250894547}, {"image_id": 9, "category_id": 3, "bbox": [965.11328125, 499.0561828613281, 317.498291015625, 269.0390319824219], "score": 0.04634943604469299}, {"image_id": 9, "category_id": 3, "bbox": [928.2544555664062, 156.98837280273438, 918.1035766601562, 890.0603332519531], "score": 0.04419582709670067}, {"image_id": 9, "category_id": 3, "bbox": [914.93701171875, 255.94549560546875, 310.1466064453125, 267.03265380859375], "score": 0.043005313724279404}, {"image_id": 9, "category_id": 3, "bbox": [961.3185424804688, 763.2428588867188, 322.10076904296875, 282.29425048828125], "score": 0.042763423174619675}, {"image_id": 9, "category_id": 3, "bbox": [1322.2069091796875, 425.1807556152344, 330.49365234375, 587.3246154785156], "score": 0.04076836258172989}, {"image_id": 9, "category_id": 3, "bbox": [866.0814208984375, 770.9434204101562, 324.0562744140625, 268.14495849609375], "score": 0.03884780406951904}, {"image_id": 9, "category_id": 3, "bbox": [871.9949340820312, 499.1190185546875, 319.68280029296875, 268.20281982421875], "score": 0.038521409034729004}, {"image_id": 9, "category_id": 3, "bbox": [1063.8782958984375, 500.34942626953125, 319.0821533203125, 266.35626220703125], "score": 0.03828959912061691}, {"image_id": 9, "category_id": 3, "bbox": [962.795654296875, 580.0164184570312, 320.888671875, 270.8056640625], "score": 0.03806180879473686}, {"image_id": 9, "category_id": 3, "bbox": [965.9172973632812, 688.5078125, 321.34600830078125, 267.49822998046875], "score": 0.03751419112086296}, {"image_id": 9, "category_id": 3, "bbox": [818.203857421875, 609.1388549804688, 323.619140625, 266.81390380859375], "score": 0.03473827242851257}, {"image_id": 9, "category_id": 3, "bbox": [0.0, 254.64950561523438, 1028.4212646484375, 825.3504943847656], "score": 0.03413067013025284}, {"image_id": 10, "category_id": 1, "bbox": [385.9270324707031, 324.050048828125, 282.5501403808594, 394.11688232421875], "score": 0.7760024070739746}, {"image_id": 10, "category_id": 1, "bbox": [540.668212890625, 333.24359130859375, 258.50018310546875, 375.416748046875], "score": 0.19947218894958496}, {"image_id": 10, "category_id": 1, "bbox": [925.8759765625, 164.4906005859375, 259.5208740234375, 437.4351806640625], "score": 0.15508656203746796}, {"image_id": 10, "category_id": 1, "bbox": [896.5825805664062, 233.64599609375, 251.89544677734375, 422.82183837890625], "score": 0.11535772681236267}, {"image_id": 10, "category_id": 1, "bbox": [167.9991455078125, 0.0, 274.2025146484375, 392.7406921386719], "score": 0.1003098413348198}, {"image_id": 10, "category_id": 1, "bbox": [295.12982177734375, 77.30337524414062, 564.5549926757812, 625.2829284667969], "score": 0.0944853276014328}, {"image_id": 10, "category_id": 1, "bbox": [1053.1385498046875, 0.0, 226.8614501953125, 408.8536682128906], "score": 0.0880342647433281}, {"image_id": 10, "category_id": 1, "bbox": [129.29159545898438, 17.112064361572266, 614.2249450683594, 638.8866539001465], "score": 0.07616733759641647}, {"image_id": 10, "category_id": 1, "bbox": [981.4067993164062, 149.6465606689453, 282.51519775390625, 406.32444763183594], "score": 0.06790372729301453}, {"image_id": 10, "category_id": 1, "bbox": [588.927490234375, 357.5108337402344, 278.10089111328125, 362.4891662597656], "score": 0.06367765367031097}, {"image_id": 10, "category_id": 1, "bbox": [547.1660766601562, 222.77423095703125, 247.4150390625, 384.09613037109375], "score": 0.061857882887125015}, {"image_id": 10, "category_id": 1, "bbox": [413.3970642089844, 454.6130676269531, 284.7623596191406, 265.3869323730469], "score": 0.05261210724711418}, {"image_id": 10, "category_id": 1, "bbox": [601.071044921875, 0.0, 318.7930908203125, 404.5834045410156], "score": 0.05111544951796532}, {"image_id": 10, "category_id": 1, "bbox": [374.9455261230469, 21.429279327392578, 637.8768005371094, 627.8414115905762], "score": 0.050975024700164795}, {"image_id": 10, "category_id": 1, "bbox": [0.0, 12.052104949951172, 507.2109069824219, 634.4641304016113], "score": 0.05048765242099762}, {"image_id": 10, "category_id": 1, "bbox": [873.0853271484375, 120.04520416259766, 271.3604736328125, 391.38741302490234], "score": 0.047349825501441956}, {"image_id": 10, "category_id": 1, "bbox": [511.6744384765625, 107.8736572265625, 651.984619140625, 590.64794921875], "score": 0.04630196467041969}, {"image_id": 10, "category_id": 1, "bbox": [296.5062561035156, 317.76446533203125, 570.8870544433594, 402.23553466796875], "score": 0.04519170522689819}, {"image_id": 10, "category_id": 1, "bbox": [131.18865966796875, 251.2709197998047, 627.6931762695312, 468.7290802001953], "score": 0.042626842856407166}, {"image_id": 10, "category_id": 1, "bbox": [726.9332885742188, 0.0, 289.17999267578125, 402.8565673828125], "score": 0.039718858897686005}, {"image_id": 10, "category_id": 1, "bbox": [266.43603515625, 0.0, 314.71697998046875, 396.3963623046875], "score": 0.035326410084962845}, {"image_id": 10, "category_id": 1, "bbox": [0.0, 242.7113037109375, 268.05377197265625, 426.6805419921875], "score": 0.03509239852428436}, {"image_id": 10, "category_id": 1, "bbox": [593.4929809570312, 266.5175476074219, 264.0191650390625, 372.2305603027344], "score": 0.03402499482035637}, {"image_id": 10, "category_id": 1, "bbox": [819.3353881835938, 170.89175415039062, 289.24700927734375, 408.2075500488281], "score": 0.03371281176805496}, {"image_id": 10, "category_id": 1, "bbox": [409.3370361328125, 224.3511199951172, 263.4180908203125, 396.61439514160156], "score": 0.03333447501063347}, {"image_id": 10, "category_id": 2, "bbox": [546.3654174804688, 303.92352294921875, 248.9910888671875, 383.94580078125], "score": 0.2735179662704468}, {"image_id": 10, "category_id": 2, "bbox": [583.8246459960938, 335.56396484375, 284.67669677734375, 370.19158935546875], "score": 0.20912976562976837}, {"image_id": 10, "category_id": 2, "bbox": [595.0691528320312, 226.331298828125, 267.4459228515625, 376.5029296875], "score": 0.13434021174907684}, {"image_id": 10, "category_id": 2, "bbox": [653.2823486328125, 304.7796936035156, 290.757568359375, 370.6663513183594], "score": 0.10644174367189407}, {"image_id": 10, "category_id": 2, "bbox": [393.56036376953125, 324.8939514160156, 279.01397705078125, 389.0417175292969], "score": 0.10093358159065247}, {"image_id": 10, "category_id": 2, "bbox": [541.4434814453125, 186.73233032226562, 262.8250732421875, 382.4510803222656], "score": 0.08487886935472488}, {"image_id": 10, "category_id": 2, "bbox": [475.76068115234375, 382.9862060546875, 296.59564208984375, 337.0137939453125], "score": 0.07808590680360794}, {"image_id": 10, "category_id": 2, "bbox": [313.19775390625, 303.53863525390625, 305.47637939453125, 401.08709716796875], "score": 0.07151831686496735}, {"image_id": 10, "category_id": 2, "bbox": [532.7481079101562, 460.3500671386719, 283.095947265625, 259.6499328613281], "score": 0.0599251352250576}, {"image_id": 10, "category_id": 2, "bbox": [720.739501953125, 324.8518371582031, 290.4185791015625, 394.0600891113281], "score": 0.05904442071914673}, {"image_id": 10, "category_id": 2, "bbox": [145.62957763671875, 81.5712890625, 593.2521362304688, 621.9876098632812], "score": 0.057571880519390106}, {"image_id": 10, "category_id": 2, "bbox": [655.0546264648438, 189.35739135742188, 294.45220947265625, 377.6946105957031], "score": 0.051007069647312164}, {"image_id": 10, "category_id": 2, "bbox": [413.3970642089844, 454.6130676269531, 284.7623596191406, 265.3869323730469], "score": 0.04888501763343811}, {"image_id": 10, "category_id": 2, "bbox": [719.6429443359375, 223.5335235595703, 307.9063720703125, 379.6269989013672], "score": 0.04792649671435356}, {"image_id": 10, "category_id": 2, "bbox": [808.1487426757812, 243.25546264648438, 300.05035400390625, 406.1888732910156], "score": 0.04698625206947327}, {"image_id": 10, "category_id": 2, "bbox": [841.21826171875, 433.8558044433594, 346.8741455078125, 286.1441955566406], "score": 0.04636036977171898}, {"image_id": 10, "category_id": 2, "bbox": [163.06480407714844, 0.0, 279.46620178222656, 350.0527648925781], "score": 0.042746249586343765}, {"image_id": 10, "category_id": 2, "bbox": [888.0396118164062, 196.14634704589844, 267.90057373046875, 435.2461700439453], "score": 0.04182962328195572}, {"image_id": 10, "category_id": 2, "bbox": [8.010650634765625, 13.366207122802734, 646.3991394042969, 645.4455604553223], "score": 0.04094677045941353}, {"image_id": 10, "category_id": 2, "bbox": [766.66650390625, 470.3379211425781, 342.76171875, 249.66207885742188], "score": 0.04075455293059349}, {"image_id": 10, "category_id": 2, "bbox": [655.5152587890625, 466.5755310058594, 313.570068359375, 253.42446899414062], "score": 0.038928091526031494}, {"image_id": 10, "category_id": 2, "bbox": [599.9642333984375, 99.83894348144531, 296.8204345703125, 407.8802947998047], "score": 0.03852409869432449}, {"image_id": 10, "category_id": 2, "bbox": [586.7642211914062, 497.49005126953125, 314.47723388671875, 222.50994873046875], "score": 0.03837299719452858}, {"image_id": 10, "category_id": 2, "bbox": [580.2703247070312, 331.42413330078125, 213.051025390625, 182.840576171875], "score": 0.03828791528940201}, {"image_id": 10, "category_id": 2, "bbox": [259.59332275390625, 0.0, 330.77154541015625, 308.1333312988281], "score": 0.03817974403500557}, {"image_id": 10, "category_id": 2, "bbox": [899.0443725585938, 314.5574951171875, 229.11761474609375, 403.1707763671875], "score": 0.03816356882452965}, {"image_id": 10, "category_id": 2, "bbox": [406.15972900390625, 84.86114501953125, 572.6685180664062, 617.2863159179688], "score": 0.038102321326732635}, {"image_id": 10, "category_id": 2, "bbox": [0.0, 10.40127182006836, 377.7883605957031, 633.6902198791504], "score": 0.037101197987794876}, {"image_id": 10, "category_id": 2, "bbox": [245.32769775390625, 0.0, 687.6099243164062, 323.94171142578125], "score": 0.0368080697953701}, {"image_id": 10, "category_id": 2, "bbox": [169.52615356445312, 69.46321868896484, 265.7151184082031, 385.89920806884766], "score": 0.03624814748764038}, {"image_id": 10, "category_id": 2, "bbox": [5.98046875, 302.73248291015625, 673.01806640625, 417.26751708984375], "score": 0.03597955405712128}, {"image_id": 10, "category_id": 2, "bbox": [385.2830505371094, 220.0565185546875, 273.2179260253906, 412.21533203125], "score": 0.035833947360515594}, {"image_id": 10, "category_id": 2, "bbox": [218.34390258789062, 99.64046478271484, 275.3382873535156, 403.12061309814453], "score": 0.03581332415342331}, {"image_id": 10, "category_id": 2, "bbox": [246.862548828125, 18.228858947753906, 632.4512939453125, 640.2853012084961], "score": 0.03498157858848572}, {"image_id": 10, "category_id": 2, "bbox": [289.0177307128906, 453.0138854980469, 308.1336364746094, 266.9861145019531], "score": 0.034224096685647964}, {"image_id": 10, "category_id": 2, "bbox": [520.0684814453125, 0.0, 675.2303466796875, 321.32147216796875], "score": 0.033815085887908936}, {"image_id": 10, "category_id": 2, "bbox": [491.712890625, 110.0461654663086, 263.667724609375, 394.07178497314453], "score": 0.03378212824463844}, {"image_id": 10, "category_id": 2, "bbox": [271.82275390625, 65.51496124267578, 296.4315185546875, 401.2272262573242], "score": 0.03283767029643059}, {"image_id": 10, "category_id": 2, "bbox": [975.9761962890625, 438.9334716796875, 300.9637451171875, 281.0665283203125], "score": 0.03267626836895943}, {"image_id": 10, "category_id": 2, "bbox": [703.70361328125, 513.396728515625, 343.9830322265625, 206.603271484375], "score": 0.03238387405872345}, {"image_id": 10, "category_id": 2, "bbox": [94.28231811523438, 111.05289459228516, 279.3468017578125, 386.16963958740234], "score": 0.03236907348036766}, {"image_id": 10, "category_id": 2, "bbox": [511.6744384765625, 107.8736572265625, 651.984619140625, 590.64794921875], "score": 0.03228681907057762}, {"image_id": 10, "category_id": 3, "bbox": [540.668212890625, 333.24359130859375, 258.50018310546875, 375.416748046875], "score": 0.6437546014785767}, {"image_id": 10, "category_id": 3, "bbox": [547.1660766601562, 222.77423095703125, 247.4150390625, 384.09613037109375], "score": 0.3783878684043884}, {"image_id": 10, "category_id": 3, "bbox": [589.0225219726562, 304.77532958984375, 272.22076416015625, 377.93707275390625], "score": 0.3602408170700073}, {"image_id": 10, "category_id": 3, "bbox": [393.56036376953125, 324.8939514160156, 279.01397705078125, 389.0417175292969], "score": 0.21413560211658478}, {"image_id": 10, "category_id": 3, "bbox": [896.5825805664062, 233.64599609375, 251.89544677734375, 422.82183837890625], "score": 0.12692305445671082}, {"image_id": 10, "category_id": 3, "bbox": [650.2211303710938, 334.04638671875, 299.64117431640625, 375.28472900390625], "score": 0.11094558238983154}, {"image_id": 10, "category_id": 3, "bbox": [925.8759765625, 164.4906005859375, 259.5208740234375, 437.4351806640625], "score": 0.10031012445688248}, {"image_id": 10, "category_id": 3, "bbox": [596.3726196289062, 191.01513671875, 271.96246337890625, 374.33087158203125], "score": 0.07260051369667053}, {"image_id": 10, "category_id": 3, "bbox": [286.81256103515625, 185.58131408691406, 575.4508666992188, 534.4186859130859], "score": 0.06729491055011749}, {"image_id": 10, "category_id": 3, "bbox": [653.4993286132812, 227.69688415527344, 293.079833984375, 375.0379180908203], "score": 0.05955657735466957}, {"image_id": 10, "category_id": 3, "bbox": [582.9999389648438, 348.8243713378906, 210.28875732421875, 182.34310913085938], "score": 0.05799015983939171}, {"image_id": 10, "category_id": 3, "bbox": [406.15972900390625, 84.86114501953125, 572.6685180664062, 617.2863159179688], "score": 0.05770942568778992}, {"image_id": 10, "category_id": 3, "bbox": [478.7300109863281, 170.2832489013672, 669.0374450683594, 549.7167510986328], "score": 0.055481620132923126}, {"image_id": 10, "category_id": 3, "bbox": [131.18865966796875, 251.2709197998047, 627.6931762695312, 468.7290802001953], "score": 0.0513739250600338}, {"image_id": 10, "category_id": 3, "bbox": [646.0064086914062, 351.076171875, 210.6717529296875, 179.2471923828125], "score": 0.05114865303039551}, {"image_id": 10, "category_id": 3, "bbox": [720.9847412109375, 293.8246765136719, 293.2696533203125, 383.4001159667969], "score": 0.04970991238951683}, {"image_id": 10, "category_id": 3, "bbox": [167.9991455078125, 0.0, 274.2025146484375, 392.7406921386719], "score": 0.04955167695879936}, {"image_id": 10, "category_id": 3, "bbox": [465.2969970703125, 426.5863037109375, 296.56268310546875, 293.4136962890625], "score": 0.048306141048669815}, {"image_id": 10, "category_id": 3, "bbox": [358.7489929199219, 318.9707946777344, 640.1451721191406, 401.0292053222656], "score": 0.04665038734674454}, {"image_id": 10, "category_id": 3, "bbox": [608.1564331054688, 389.9068908691406, 215.21868896484375, 175.05685424804688], "score": 0.045425888150930405}, {"image_id": 10, "category_id": 3, "bbox": [617.8295288085938, 315.51422119140625, 208.84423828125, 179.56317138671875], "score": 0.04493549093604088}, {"image_id": 10, "category_id": 3, "bbox": [814.2109375, 209.64112854003906, 298.9959716796875, 404.5304412841797], "score": 0.042251359671354294}, {"image_id": 10, "category_id": 3, "bbox": [981.4067993164062, 149.6465606689453, 282.51519775390625, 406.32444763183594], "score": 0.04144832491874695}, {"image_id": 10, "category_id": 3, "bbox": [548.7759399414062, 316.4497375488281, 214.833740234375, 175.79885864257812], "score": 0.03731105104088783}, {"image_id": 10, "category_id": 3, "bbox": [129.29159545898438, 17.112064361572266, 614.2249450683594, 638.8866539001465], "score": 0.03558099642395973}, {"image_id": 10, "category_id": 3, "bbox": [516.31787109375, 354.08734130859375, 214.8306884765625, 172.2762451171875], "score": 0.034608546644449234}, {"image_id": 10, "category_id": 3, "bbox": [873.0853271484375, 120.04520416259766, 271.3604736328125, 391.38741302490234], "score": 0.034475311636924744}, {"image_id": 10, "category_id": 3, "bbox": [389.86376953125, 329.373046875, 210.3167724609375, 183.5889892578125], "score": 0.03443215787410736}, {"image_id": 10, "category_id": 3, "bbox": [544.387451171875, 391.4739074707031, 219.92462158203125, 171.25869750976562], "score": 0.034017641097307205}, {"image_id": 10, "category_id": 3, "bbox": [934.135009765625, 387.96234130859375, 211.264892578125, 175.591064453125], "score": 0.03375770151615143}, {"image_id": 10, "category_id": 3, "bbox": [577.310546875, 423.4808654785156, 217.793701171875, 179.78134155273438], "score": 0.033644482493400574}, {"image_id": 10, "category_id": 3, "bbox": [925.55029296875, 309.7933654785156, 250.02587890625, 401.2548522949219], "score": 0.03245498239994049}, {"image_id": 10, "category_id": 3, "bbox": [640.8983764648438, 424.2222900390625, 217.37823486328125, 177.49951171875], "score": 0.03243618458509445}, {"image_id": 11, "category_id": 1, "bbox": [779.1370849609375, 46.5333137512207, 191.319580078125, 362.3571586608887], "score": 0.46955493092536926}, {"image_id": 11, "category_id": 1, "bbox": [718.7545166015625, 42.9426383972168, 206.7415771484375, 368.3843879699707], "score": 0.4012986719608307}, {"image_id": 11, "category_id": 1, "bbox": [794.8621215820312, 14.626184463500977, 249.45318603515625, 371.06342124938965], "score": 0.15162675082683563}, {"image_id": 11, "category_id": 1, "bbox": [704.9242553710938, 144.6870880126953, 218.36248779296875, 384.3347625732422], "score": 0.10272020846605301}, {"image_id": 11, "category_id": 1, "bbox": [892.0260620117188, 228.2760009765625, 265.25274658203125, 430.68914794921875], "score": 0.0765485092997551}, {"image_id": 11, "category_id": 1, "bbox": [189.1900634765625, 13.987346649169922, 768.3843994140625, 655.7635688781738], "score": 0.06816741824150085}, {"image_id": 11, "category_id": 1, "bbox": [0.0, 6.673713684082031, 674.0738525390625, 650.5119552612305], "score": 0.061500679701566696}, {"image_id": 11, "category_id": 1, "bbox": [0.0, 248.96502685546875, 281.60711669921875, 401.845947265625], "score": 0.04923025518655777}, {"image_id": 11, "category_id": 1, "bbox": [642.0755615234375, 0.0, 281.9366455078125, 361.0936279296875], "score": 0.044655412435531616}, {"image_id": 11, "category_id": 1, "bbox": [1068.2855224609375, 4.394926071166992, 211.7144775390625, 391.68362617492676], "score": 0.042452696710824966}, {"image_id": 11, "category_id": 1, "bbox": [798.2700805664062, 148.72113037109375, 255.89031982421875, 383.3824462890625], "score": 0.042280685156583786}, {"image_id": 11, "category_id": 1, "bbox": [742.6051025390625, 183.2581329345703, 248.4949951171875, 374.3478240966797], "score": 0.041502393782138824}, {"image_id": 11, "category_id": 1, "bbox": [164.8495635986328, 41.847713470458984, 281.13426208496094, 370.40768814086914], "score": 0.04064580053091049}, {"image_id": 11, "category_id": 1, "bbox": [871.5300903320312, 149.3250732421875, 268.93426513671875, 393.34625244140625], "score": 0.040580105036497116}, {"image_id": 11, "category_id": 1, "bbox": [369.7220458984375, 97.34040832519531, 675.3466796875, 603.3227386474609], "score": 0.03999977931380272}, {"image_id": 11, "category_id": 2, "bbox": [701.6385498046875, 192.95765686035156, 215.41363525390625, 358.34910583496094], "score": 0.16289815306663513}, {"image_id": 11, "category_id": 2, "bbox": [750.3140869140625, 144.40762329101562, 235.0313720703125, 386.3697204589844], "score": 0.14088527858257294}, {"image_id": 11, "category_id": 2, "bbox": [636.0598754882812, 226.2519989013672, 240.99615478515625, 369.95008850097656], "score": 0.1225067600607872}, {"image_id": 11, "category_id": 2, "bbox": [777.462158203125, 67.86182403564453, 196.4267578125, 382.82616424560547], "score": 0.10131408274173737}, {"image_id": 11, "category_id": 2, "bbox": [679.267578125, 295.8843994140625, 247.235595703125, 376.7857666015625], "score": 0.0990644097328186}, {"image_id": 11, "category_id": 2, "bbox": [738.406005859375, 256.9725036621094, 264.9801025390625, 380.0640563964844], "score": 0.09659185260534286}, {"image_id": 11, "category_id": 2, "bbox": [716.424072265625, 63.118343353271484, 208.61395263671875, 392.160099029541], "score": 0.09596394002437592}, {"image_id": 11, "category_id": 2, "bbox": [637.2542724609375, 111.1808853149414, 274.1754150390625, 383.6102828979492], "score": 0.09253831952810287}, {"image_id": 11, "category_id": 2, "bbox": [794.44091796875, 205.35617065429688, 297.3480224609375, 414.8065490722656], "score": 0.09230341017246246}, {"image_id": 11, "category_id": 2, "bbox": [547.1419677734375, 181.5594024658203, 301.903564453125, 384.2992401123047], "score": 0.08236567676067352}, {"image_id": 11, "category_id": 2, "bbox": [797.1911010742188, 117.08809661865234, 255.30755615234375, 380.62415313720703], "score": 0.07981717586517334}, {"image_id": 11, "category_id": 2, "bbox": [883.1527099609375, 193.2349853515625, 273.2110595703125, 440.92578125], "score": 0.07604651153087616}, {"image_id": 11, "category_id": 2, "bbox": [864.53271484375, 122.66622924804688, 263.9609375, 377.2273254394531], "score": 0.06478855758905411}, {"image_id": 11, "category_id": 2, "bbox": [944.2801513671875, 127.92854309082031, 224.709228515625, 368.57554626464844], "score": 0.05869733542203903}, {"image_id": 11, "category_id": 2, "bbox": [524.582763671875, 301.70855712890625, 337.21966552734375, 368.741455078125], "score": 0.05833056941628456}, {"image_id": 11, "category_id": 2, "bbox": [595.8057250976562, 330.87188720703125, 308.25885009765625, 381.8055419921875], "score": 0.05673690140247345}, {"image_id": 11, "category_id": 2, "bbox": [922.629150390625, 44.434288024902344, 266.5125732421875, 381.7965774536133], "score": 0.0534571073949337}, {"image_id": 11, "category_id": 2, "bbox": [340.5760192871094, 11.83835220336914, 695.0296936035156, 653.9527854919434], "score": 0.052788145840168}, {"image_id": 11, "category_id": 2, "bbox": [854.3621215820312, 7.072071075439453, 273.56536865234375, 380.6824150085449], "score": 0.051455944776535034}, {"image_id": 11, "category_id": 2, "bbox": [861.9510498046875, 434.7363586425781, 310.028076171875, 285.2636413574219], "score": 0.05019555985927582}, {"image_id": 11, "category_id": 2, "bbox": [1014.7501831054688, 46.854251861572266, 240.51300048828125, 382.5661277770996], "score": 0.0491897352039814}, {"image_id": 11, "category_id": 2, "bbox": [794.8621215820312, 14.626184463500977, 249.45318603515625, 371.06342124938965], "score": 0.04910200089216232}, {"image_id": 11, "category_id": 2, "bbox": [644.1312866210938, 7.890569686889648, 280.20880126953125, 384.236047744751], "score": 0.048697710037231445}, {"image_id": 11, "category_id": 2, "bbox": [466.9815979003906, 260.9541320800781, 323.8586730957031, 378.7810974121094], "score": 0.04696520045399666}, {"image_id": 11, "category_id": 2, "bbox": [165.14144897460938, 12.084428787231445, 284.78216552734375, 371.0463695526123], "score": 0.046600740402936935}, {"image_id": 11, "category_id": 2, "bbox": [896.5947875976562, 311.259765625, 236.25616455078125, 397.14373779296875], "score": 0.04639110341668129}, {"image_id": 11, "category_id": 2, "bbox": [804.5625610351562, 319.78692626953125, 275.41754150390625, 388.90191650390625], "score": 0.04600277543067932}, {"image_id": 11, "category_id": 2, "bbox": [532.965087890625, 68.96965789794922, 322.23565673828125, 391.7481460571289], "score": 0.0453573502600193}, {"image_id": 11, "category_id": 2, "bbox": [738.5543212890625, 361.6784362792969, 270.0025634765625, 358.3215637207031], "score": 0.04480978846549988}, {"image_id": 11, "category_id": 2, "bbox": [390.4817199707031, 229.00918579101562, 641.6777038574219, 490.9908142089844], "score": 0.043070729821920395}, {"image_id": 11, "category_id": 2, "bbox": [675.709228515625, 331.6733703613281, 205.2822265625, 180.94284057617188], "score": 0.04277121275663376}, {"image_id": 11, "category_id": 2, "bbox": [63.5770263671875, 0.0, 360.7054138183594, 278.1374206542969], "score": 0.041975487023591995}, {"image_id": 11, "category_id": 2, "bbox": [1065.0152587890625, 127.50337219238281, 214.9847412109375, 369.5776824951172], "score": 0.04182436689734459}, {"image_id": 11, "category_id": 2, "bbox": [198.9335174560547, 75.20108795166016, 286.5465545654297, 372.5018539428711], "score": 0.04146486893296242}, {"image_id": 11, "category_id": 2, "bbox": [792.3540649414062, 466.0991516113281, 317.79547119140625, 253.90084838867188], "score": 0.040955591946840286}, {"image_id": 11, "category_id": 2, "bbox": [740.0891723632812, 332.0237121582031, 207.89190673828125, 179.24972534179688], "score": 0.04091639071702957}, {"image_id": 11, "category_id": 2, "bbox": [490.6361389160156, 183.82763671875, 267.8326110839844, 386.11199951171875], "score": 0.04090328514575958}, {"image_id": 11, "category_id": 2, "bbox": [214.27859497070312, 0.0, 736.2786560058594, 336.76885986328125], "score": 0.040332499891519547}, {"image_id": 11, "category_id": 2, "bbox": [84.68405151367188, 88.35015869140625, 747.4914855957031, 597.5097045898438], "score": 0.03995379060506821}, {"image_id": 11, "category_id": 2, "bbox": [554.252197265625, 110.8956527709961, 571.807861328125, 594.7103042602539], "score": 0.039806246757507324}, {"image_id": 11, "category_id": 2, "bbox": [770.737060546875, 0.0, 204.542236328125, 302.9700622558594], "score": 0.03977547585964203}, {"image_id": 11, "category_id": 2, "bbox": [261.4460144042969, 16.781702041625977, 307.7093811035156, 364.2231197357178], "score": 0.03905419632792473}, {"image_id": 11, "category_id": 2, "bbox": [921.0648193359375, 0.0, 302.3389892578125, 310.12713623046875], "score": 0.037891436368227005}, {"image_id": 11, "category_id": 2, "bbox": [643.369140625, 278.820556640625, 210.63873291015625, 181.83984375], "score": 0.037853922694921494}, {"image_id": 11, "category_id": 2, "bbox": [1068.2855224609375, 4.394926071166992, 211.7144775390625, 391.68362617492676], "score": 0.03759898990392685}, {"image_id": 11, "category_id": 2, "bbox": [254.7586669921875, 0.0, 333.7711181640625, 231.090087890625], "score": 0.037481509149074554}, {"image_id": 11, "category_id": 2, "bbox": [387.1231689453125, 328.9059143066406, 350.3863525390625, 391.0940856933594], "score": 0.037060823291540146}, {"image_id": 11, "category_id": 2, "bbox": [334.8962707519531, 0.0, 300.0085144042969, 315.9199523925781], "score": 0.03705647215247154}, {"image_id": 11, "category_id": 2, "bbox": [2.426177978515625, 0.0, 670.9460144042969, 332.9405822753906], "score": 0.0368431955575943}, {"image_id": 11, "category_id": 3, "bbox": [704.9242553710938, 144.6870880126953, 218.36248779296875, 384.3347625732422], "score": 0.3907516598701477}, {"image_id": 11, "category_id": 3, "bbox": [718.7545166015625, 42.9426383972168, 206.7415771484375, 368.3843879699707], "score": 0.2658381462097168}, {"image_id": 11, "category_id": 3, "bbox": [779.1370849609375, 46.5333137512207, 191.319580078125, 362.3571586608887], "score": 0.22972407937049866}, {"image_id": 11, "category_id": 3, "bbox": [742.6051025390625, 183.2581329345703, 248.4949951171875, 374.3478240966797], "score": 0.21524298191070557}, {"image_id": 11, "category_id": 3, "bbox": [640.0628662109375, 183.9752197265625, 242.076171875, 373.30889892578125], "score": 0.18603840470314026}, {"image_id": 11, "category_id": 3, "bbox": [690.8125610351562, 260.6231384277344, 232.46807861328125, 373.3835754394531], "score": 0.12550030648708344}, {"image_id": 11, "category_id": 3, "bbox": [794.8621215820312, 14.626184463500977, 249.45318603515625, 371.06342124938965], "score": 0.12470848113298416}, {"image_id": 11, "category_id": 3, "bbox": [798.2700805664062, 148.72113037109375, 255.89031982421875, 383.3824462890625], "score": 0.11529411375522614}, {"image_id": 11, "category_id": 3, "bbox": [883.1527099609375, 193.2349853515625, 273.2110595703125, 440.92578125], "score": 0.10518863052129745}, {"image_id": 11, "category_id": 3, "bbox": [675.709228515625, 331.6733703613281, 205.2822265625, 180.94284057617188], "score": 0.07787305861711502}, {"image_id": 11, "category_id": 3, "bbox": [340.5760192871094, 11.83835220336914, 695.0296936035156, 653.9527854919434], "score": 0.07609529048204422}, {"image_id": 11, "category_id": 3, "bbox": [936.057861328125, 153.892333984375, 232.618896484375, 381.02203369140625], "score": 0.07046891748905182}, {"image_id": 11, "category_id": 3, "bbox": [164.8495635986328, 41.847713470458984, 281.13426208496094, 370.40768814086914], "score": 0.06933244317770004}, {"image_id": 11, "category_id": 3, "bbox": [740.0891723632812, 332.0237121582031, 207.89190673828125, 179.24972534179688], "score": 0.0688258484005928}, {"image_id": 11, "category_id": 3, "bbox": [736.1736450195312, 293.7282409667969, 266.8717041015625, 376.6335144042969], "score": 0.058899473398923874}, {"image_id": 11, "category_id": 3, "bbox": [634.2232055664062, 68.08182525634766, 286.32476806640625, 390.9419174194336], "score": 0.05779620632529259}, {"image_id": 11, "category_id": 3, "bbox": [795.5164794921875, 239.70907592773438, 303.896240234375, 411.5073547363281], "score": 0.054072167724370956}, {"image_id": 11, "category_id": 3, "bbox": [675.3837890625, 277.0909729003906, 210.74908447265625, 183.8741455078125], "score": 0.0539676658809185}, {"image_id": 11, "category_id": 3, "bbox": [610.82080078125, 330.2734680175781, 213.03466796875, 184.96249389648438], "score": 0.049239590764045715}, {"image_id": 11, "category_id": 3, "bbox": [58.561065673828125, 17.210838317871094, 764.6409606933594, 646.1836929321289], "score": 0.04885166138410568}, {"image_id": 11, "category_id": 3, "bbox": [707.2007446289062, 174.4210968017578, 214.147705078125, 171.28334045410156], "score": 0.047084078192710876}, {"image_id": 11, "category_id": 3, "bbox": [1060.800537109375, 87.31904602050781, 219.199462890625, 375.49375915527344], "score": 0.04677559807896614}, {"image_id": 11, "category_id": 3, "bbox": [772.1351318359375, 175.31048583984375, 213.7926025390625, 170.41290283203125], "score": 0.044128261506557465}, {"image_id": 11, "category_id": 3, "bbox": [709.325439453125, 375.8675842285156, 212.43896484375, 167.75241088867188], "score": 0.04319290071725845}, {"image_id": 11, "category_id": 3, "bbox": [737.7958984375, 280.828369140625, 213.96795654296875, 176.734619140625], "score": 0.041627801954746246}, {"image_id": 11, "category_id": 3, "bbox": [738.9747314453125, 138.7034149169922, 215.20849609375, 172.5585479736328], "score": 0.04085201025009155}, {"image_id": 11, "category_id": 3, "bbox": [639.38427734375, 374.4417724609375, 216.11541748046875, 170.953125], "score": 0.040680766105651855}, {"image_id": 11, "category_id": 3, "bbox": [833.4569091796875, 314.86163330078125, 216.33154296875, 180.05511474609375], "score": 0.04059950262308121}, {"image_id": 11, "category_id": 3, "bbox": [864.53271484375, 122.66622924804688, 263.9609375, 377.2273254394531], "score": 0.04054669290781021}, {"image_id": 11, "category_id": 3, "bbox": [674.0697021484375, 208.67898559570312, 215.0179443359375, 176.3353271484375], "score": 0.0393824577331543}, {"image_id": 11, "category_id": 3, "bbox": [739.9862060546875, 211.44493103027344, 212.7518310546875, 168.02915954589844], "score": 0.03912266716361046}, {"image_id": 11, "category_id": 3, "bbox": [802.6735229492188, 209.39120483398438, 213.19818115234375, 170.81918334960938], "score": 0.038550619035959244}, {"image_id": 11, "category_id": 3, "bbox": [930.3099365234375, 315.4117126464844, 216.8546142578125, 178.4326171875], "score": 0.03851710632443428}, {"image_id": 11, "category_id": 3, "bbox": [230.3362579345703, 122.21784973144531, 211.18687438964844, 168.70310974121094], "score": 0.037478670477867126}, {"image_id": 11, "category_id": 3, "bbox": [774.1971435546875, 120.63484191894531, 213.400634765625, 171.6195526123047], "score": 0.03739669546484947}, {"image_id": 11, "category_id": 3, "bbox": [708.9439697265625, 80.83699035644531, 209.73614501953125, 176.11949157714844], "score": 0.03720122203230858}, {"image_id": 12, "category_id": 1, "bbox": [601.8173828125, 441.80859375, 352.7392578125, 588.391845703125], "score": 0.7737776637077332}, {"image_id": 12, "category_id": 1, "bbox": [690.0017700195312, 432.28436279296875, 434.51446533203125, 611.7064819335938], "score": 0.15456722676753998}, {"image_id": 12, "category_id": 1, "bbox": [795.7442016601562, 434.0965881347656, 450.25567626953125, 590.0771179199219], "score": 0.09207431226968765}, {"image_id": 12, "category_id": 1, "bbox": [1260.990966796875, 309.8225402832031, 354.7845458984375, 640.2354431152344], "score": 0.08276423066854477}, {"image_id": 12, "category_id": 1, "bbox": [597.9385986328125, 274.6987609863281, 323.5638427734375, 620.6542663574219], "score": 0.0779237449169159}, {"image_id": 12, "category_id": 1, "bbox": [912.1611328125, 318.13238525390625, 394.2205810546875, 618.91748046875], "score": 0.06211480870842934}, {"image_id": 12, "category_id": 1, "bbox": [1679.2716064453125, 0.0, 240.7283935546875, 218.42222595214844], "score": 0.057534243911504745}, {"image_id": 12, "category_id": 1, "bbox": [1312.9747314453125, 217.0583953857422, 332.7945556640625, 605.2796783447266], "score": 0.0565544031560421}, {"image_id": 12, "category_id": 1, "bbox": [185.7776641845703, 53.12977981567383, 417.35362243652344, 569.0011405944824], "score": 0.055418260395526886}, {"image_id": 12, "category_id": 1, "bbox": [446.2223815917969, 242.1199493408203, 884.1040344238281, 837.8800506591797], "score": 0.055144257843494415}, {"image_id": 12, "category_id": 1, "bbox": [0.0, 116.85978698730469, 759.5491333007812, 927.0108184814453], "score": 0.054688941687345505}, {"image_id": 12, "category_id": 1, "bbox": [0.0, 483.6027526855469, 405.07342529296875, 593.2090148925781], "score": 0.05444100499153137}, {"image_id": 12, "category_id": 1, "bbox": [843.2314453125, 21.448625564575195, 852.6739501953125, 934.3459300994873], "score": 0.05167866498231888}, {"image_id": 12, "category_id": 1, "bbox": [473.16265869140625, 495.9841003417969, 435.8616943359375, 584.0158996582031], "score": 0.05142059177160263}, {"image_id": 12, "category_id": 1, "bbox": [190.19622802734375, 114.6008071899414, 1018.1845092773438, 923.8883285522461], "score": 0.05018168315291405}, {"image_id": 12, "category_id": 1, "bbox": [1329.0916748046875, 424.3099060058594, 307.5262451171875, 588.3484802246094], "score": 0.04826788976788521}, {"image_id": 12, "category_id": 1, "bbox": [611.2228393554688, 121.09336853027344, 918.0411987304688, 915.2234039306641], "score": 0.04745086655020714}, {"image_id": 12, "category_id": 1, "bbox": [393.0368347167969, 623.342529296875, 949.0783996582031, 456.657470703125], "score": 0.047011133283376694}, {"image_id": 12, "category_id": 1, "bbox": [0.0, 362.1678466796875, 982.2105712890625, 717.8321533203125], "score": 0.04126385599374771}, {"image_id": 12, "category_id": 1, "bbox": [511.59979248046875, 484.50262451171875, 1004.6022338867188, 595.4973754882812], "score": 0.036823246628046036}, {"image_id": 12, "category_id": 1, "bbox": [579.8350219726562, 610.5945434570312, 536.0455932617188, 469.40545654296875], "score": 0.036224059760570526}, {"image_id": 12, "category_id": 2, "bbox": [677.2225341796875, 618.4576416015625, 542.1937255859375, 461.5423583984375], "score": 0.43429118394851685}, {"image_id": 12, "category_id": 2, "bbox": [800.14404296875, 499.8337097167969, 458.5841064453125, 557.6874084472656], "score": 0.19421766698360443}, {"image_id": 12, "category_id": 2, "bbox": [882.911865234375, 619.9710693359375, 400.2708740234375, 460.0289306640625], "score": 0.17597444355487823}, {"image_id": 12, "category_id": 2, "bbox": [507.3101806640625, 662.9171142578125, 475.36602783203125, 417.0828857421875], "score": 0.15084023773670197}, {"image_id": 12, "category_id": 2, "bbox": [982.5060424804688, 575.0588989257812, 438.13128662109375, 504.94110107421875], "score": 0.11354175209999084}, {"image_id": 12, "category_id": 2, "bbox": [579.771240234375, 487.6549377441406, 375.6802978515625, 592.3450622558594], "score": 0.07835820317268372}, {"image_id": 12, "category_id": 2, "bbox": [1079.4449462890625, 498.6708068847656, 445.66552734375, 563.0434265136719], "score": 0.06683605164289474}, {"image_id": 12, "category_id": 2, "bbox": [1625.9425048828125, 0.0, 294.0574951171875, 210.53097534179688], "score": 0.06198965013027191}, {"image_id": 12, "category_id": 2, "bbox": [907.2764282226562, 372.9458923339844, 388.55059814453125, 604.3703918457031], "score": 0.061804696917533875}, {"image_id": 12, "category_id": 2, "bbox": [624.99658203125, 207.00054931640625, 836.52197265625, 872.9994506835938], "score": 0.06161037087440491}, {"image_id": 12, "category_id": 2, "bbox": [690.0017700195312, 432.28436279296875, 434.51446533203125, 611.7064819335938], "score": 0.05963847041130066}, {"image_id": 12, "category_id": 2, "bbox": [344.8124084472656, 717.1904296875, 452.2947692871094, 362.8095703125], "score": 0.05962981656193733}, {"image_id": 12, "category_id": 2, "bbox": [1261.99267578125, 481.24261474609375, 347.9508056640625, 579.4790649414062], "score": 0.05928800627589226}, {"image_id": 12, "category_id": 2, "bbox": [487.0001525878906, 398.3652648925781, 434.4202575683594, 592.3514099121094], "score": 0.050188399851322174}, {"image_id": 12, "category_id": 2, "bbox": [181.91403198242188, 0.0, 418.4808044433594, 549.3577270507812], "score": 0.0491303876042366}, {"image_id": 12, "category_id": 2, "bbox": [1254.795654296875, 259.99896240234375, 357.373779296875, 637.5044555664062], "score": 0.048936039209365845}, {"image_id": 12, "category_id": 2, "bbox": [1321.721923828125, 359.9975280761719, 321.6336669921875, 621.7731628417969], "score": 0.04854520410299301}, {"image_id": 12, "category_id": 2, "bbox": [202.12147521972656, 701.5454711914062, 504.83287048339844, 378.45452880859375], "score": 0.048521045595407486}, {"image_id": 12, "category_id": 2, "bbox": [982.26708984375, 0.0, 475.01904296875, 485.7975158691406], "score": 0.0475880429148674}, {"image_id": 12, "category_id": 2, "bbox": [288.1151123046875, 20.625036239624023, 473.2933349609375, 559.4637699127197], "score": 0.04747525230050087}, {"image_id": 12, "category_id": 2, "bbox": [29.005645751953125, 114.08293914794922, 938.0732727050781, 927.5656204223633], "score": 0.046152736991643906}, {"image_id": 12, "category_id": 2, "bbox": [849.8958129882812, 84.48847961425781, 418.47418212890625, 641.0147552490234], "score": 0.04485584795475006}, {"image_id": 12, "category_id": 2, "bbox": [0.0, 467.1936950683594, 1014.976318359375, 612.8063049316406], "score": 0.04368356242775917}, {"image_id": 12, "category_id": 2, "bbox": [845.8137817382812, 0.0, 398.67901611328125, 473.489990234375], "score": 0.04297690466046333}, {"image_id": 12, "category_id": 2, "bbox": [1005.12890625, 314.86309814453125, 387.755126953125, 615.4327392578125], "score": 0.042273759841918945}, {"image_id": 12, "category_id": 2, "bbox": [393.0368347167969, 623.342529296875, 949.0783996582031, 456.657470703125], "score": 0.04130273684859276}, {"image_id": 12, "category_id": 2, "bbox": [1310.0853271484375, 170.39955139160156, 344.1466064453125, 596.6940155029297], "score": 0.040926288813352585}, {"image_id": 12, "category_id": 2, "bbox": [700.6403198242188, 456.9290466308594, 986.4298706054688, 623.0709533691406], "score": 0.04040060192346573}, {"image_id": 12, "category_id": 2, "bbox": [1232.146728515625, 114.22003173828125, 367.6973876953125, 593.4590454101562], "score": 0.04023842141032219}, {"image_id": 12, "category_id": 2, "bbox": [188.04519653320312, 249.40577697753906, 1019.5010681152344, 830.5942230224609], "score": 0.0400986447930336}, {"image_id": 12, "category_id": 2, "bbox": [88.51013946533203, 586.7164916992188, 533.220085144043, 493.28350830078125], "score": 0.04009537026286125}, {"image_id": 12, "category_id": 2, "bbox": [1719.45703125, 0.0, 200.54296875, 179.42874145507812], "score": 0.03973136469721794}, {"image_id": 12, "category_id": 2, "bbox": [1123.740478515625, 315.3453674316406, 417.3787841796875, 611.2032775878906], "score": 0.039533648639917374}, {"image_id": 12, "category_id": 2, "bbox": [920.5845336914062, 202.11849975585938, 396.33465576171875, 625.9916076660156], "score": 0.03903505578637123}, {"image_id": 12, "category_id": 2, "bbox": [612.5509643554688, 316.9517822265625, 380.58306884765625, 659.3436889648438], "score": 0.038893818855285645}, {"image_id": 12, "category_id": 2, "bbox": [1002.6077880859375, 78.46993255615234, 393.4376220703125, 648.7083511352539], "score": 0.03821719065308571}, {"image_id": 12, "category_id": 2, "bbox": [966.3305053710938, 355.1768798828125, 953.5094604492188, 724.8231201171875], "score": 0.03808940574526787}, {"image_id": 12, "category_id": 2, "bbox": [1068.8001708984375, 0.0, 487.8548583984375, 365.6325988769531], "score": 0.037880945950746536}, {"image_id": 12, "category_id": 2, "bbox": [1305.4249267578125, 595.4780883789062, 404.833251953125, 484.52191162109375], "score": 0.03753478452563286}, {"image_id": 12, "category_id": 2, "bbox": [803.8811645507812, 131.1870574951172, 909.0214233398438, 910.4693145751953], "score": 0.03737224265933037}, {"image_id": 12, "category_id": 2, "bbox": [179.79991149902344, 104.4603042602539, 393.7091827392578, 677.4410018920898], "score": 0.037081919610500336}, {"image_id": 12, "category_id": 2, "bbox": [827.199462890625, 268.530517578125, 426.826904296875, 617.1079711914062], "score": 0.03675014525651932}, {"image_id": 12, "category_id": 2, "bbox": [816.890625, 768.3983154296875, 329.675537109375, 272.21728515625], "score": 0.036394450813531876}, {"image_id": 12, "category_id": 2, "bbox": [894.88330078125, 0.0, 440.35302734375, 359.491943359375], "score": 0.035985685884952545}, {"image_id": 12, "category_id": 2, "bbox": [418.9116516113281, 13.831727027893066, 438.9847717285156, 663.4672842025757], "score": 0.035473212599754333}, {"image_id": 12, "category_id": 3, "bbox": [800.14404296875, 499.8337097167969, 458.5841064453125, 557.6874084472656], "score": 0.23963668942451477}, {"image_id": 12, "category_id": 3, "bbox": [677.2225341796875, 618.4576416015625, 542.1937255859375, 461.5423583984375], "score": 0.1703554093837738}, {"image_id": 12, "category_id": 3, "bbox": [882.911865234375, 619.9710693359375, 400.2708740234375, 460.0289306640625], "score": 0.11514583230018616}, {"image_id": 12, "category_id": 3, "bbox": [377.4486083984375, 379.268310546875, 978.9820556640625, 700.731689453125], "score": 0.10202396661043167}, {"image_id": 12, "category_id": 3, "bbox": [615.5186157226562, 469.5649108886719, 381.69561767578125, 599.1821594238281], "score": 0.09823818504810333}, {"image_id": 12, "category_id": 3, "bbox": [1264.7440185546875, 361.4244384765625, 350.9771728515625, 623.5684204101562], "score": 0.08054392039775848}, {"image_id": 12, "category_id": 3, "bbox": [1625.9425048828125, 0.0, 294.0574951171875, 210.53097534179688], "score": 0.06957439333200455}, {"image_id": 12, "category_id": 3, "bbox": [185.7776641845703, 53.12977981567383, 417.35362243652344, 569.0011405944824], "score": 0.06946542114019394}, {"image_id": 12, "category_id": 3, "bbox": [871.5813598632812, 657.9446411132812, 316.20318603515625, 276.5465087890625], "score": 0.0650390237569809}, {"image_id": 12, "category_id": 3, "bbox": [1312.9747314453125, 217.0583953857422, 332.7945556640625, 605.2796783447266], "score": 0.06484898924827576}, {"image_id": 12, "category_id": 3, "bbox": [907.2764282226562, 372.9458923339844, 388.55059814453125, 604.3703918457031], "score": 0.05923984944820404}, {"image_id": 12, "category_id": 3, "bbox": [970.4286499023438, 447.9388122558594, 452.95428466796875, 561.0511779785156], "score": 0.05717625841498375}, {"image_id": 12, "category_id": 3, "bbox": [771.9790649414062, 660.865234375, 320.62152099609375, 270.9241943359375], "score": 0.053519248962402344}, {"image_id": 12, "category_id": 3, "bbox": [626.3580932617188, 787.102783203125, 330.314697265625, 290.57958984375], "score": 0.05106491595506668}, {"image_id": 12, "category_id": 3, "bbox": [920.55029296875, 610.0082397460938, 318.088134765625, 267.2923583984375], "score": 0.05062280222773552}, {"image_id": 12, "category_id": 3, "bbox": [964.7506713867188, 659.5056762695312, 318.47186279296875, 270.42474365234375], "score": 0.05022395774722099}, {"image_id": 12, "category_id": 3, "bbox": [479.8090515136719, 818.4590454101562, 326.9645080566406, 261.54095458984375], "score": 0.04975845292210579}, {"image_id": 12, "category_id": 3, "bbox": [1326.694580078125, 477.92340087890625, 313.7740478515625, 578.5661010742188], "score": 0.047096725553274155}, {"image_id": 12, "category_id": 3, "bbox": [1719.45703125, 0.0, 200.54296875, 179.42874145507812], "score": 0.04660116136074066}, {"image_id": 12, "category_id": 3, "bbox": [816.890625, 768.3983154296875, 329.675537109375, 272.21728515625], "score": 0.04621041566133499}, {"image_id": 12, "category_id": 3, "bbox": [12.832992553710938, 258.3187255859375, 979.1037139892578, 821.6812744140625], "score": 0.04612063989043236}, {"image_id": 12, "category_id": 3, "bbox": [914.6484375, 715.2711791992188, 326.44970703125, 274.8294677734375], "score": 0.04600612074136734}, {"image_id": 12, "category_id": 3, "bbox": [819.3697509765625, 610.6829833984375, 322.9691162109375, 268.1749267578125], "score": 0.044232238084077835}, {"image_id": 12, "category_id": 3, "bbox": [525.1864013671875, 769.0679931640625, 333.10546875, 274.8170166015625], "score": 0.04334491491317749}, {"image_id": 12, "category_id": 3, "bbox": [507.3101806640625, 662.9171142578125, 475.36602783203125, 417.0828857421875], "score": 0.04221775755286217}, {"image_id": 12, "category_id": 3, "bbox": [383.2598876953125, 797.5326538085938, 330.9185791015625, 269.98736572265625], "score": 0.04153613746166229}, {"image_id": 12, "category_id": 3, "bbox": [137.39254760742188, 623.892578125, 1089.6866760253906, 456.107421875], "score": 0.04133329540491104}, {"image_id": 12, "category_id": 3, "bbox": [690.0017700195312, 432.28436279296875, 434.51446533203125, 611.7064819335938], "score": 0.040167950093746185}, {"image_id": 12, "category_id": 3, "bbox": [1013.5076904296875, 743.6345825195312, 322.8565673828125, 269.2882080078125], "score": 0.03909161314368248}, {"image_id": 12, "category_id": 3, "bbox": [714.56982421875, 744.7647094726562, 334.3826904296875, 265.28460693359375], "score": 0.03857181593775749}, {"image_id": 12, "category_id": 3, "bbox": [569.2018432617188, 875.9876708984375, 338.492431640625, 204.0123291015625], "score": 0.03760620206594467}, {"image_id": 12, "category_id": 3, "bbox": [717.5430297851562, 827.5550537109375, 334.46417236328125, 252.4449462890625], "score": 0.037374671548604965}, {"image_id": 12, "category_id": 3, "bbox": [911.6328735351562, 796.5556030273438, 330.31158447265625, 272.57733154296875], "score": 0.036932870745658875}, {"image_id": 12, "category_id": 3, "bbox": [623.31884765625, 716.4613647460938, 330.73626708984375, 269.55682373046875], "score": 0.03544342517852783}, {"image_id": 13, "category_id": 1, "bbox": [1063.9581298828125, 145.32586669921875, 302.607177734375, 656.3079223632812], "score": 0.693933367729187}, {"image_id": 13, "category_id": 1, "bbox": [1104.3973388671875, 167.74453735351562, 404.55029296875, 683.2862854003906], "score": 0.5187339186668396}, {"image_id": 13, "category_id": 1, "bbox": [802.1742553710938, 362.2489013671875, 391.48614501953125, 568.2369995117188], "score": 0.21191343665122986}, {"image_id": 13, "category_id": 1, "bbox": [1037.00341796875, 342.51873779296875, 344.5321044921875, 617.31884765625], "score": 0.20289115607738495}, {"image_id": 13, "category_id": 1, "bbox": [869.2000732421875, 274.2851257324219, 459.4012451171875, 591.2498474121094], "score": 0.13348454236984253}, {"image_id": 13, "category_id": 1, "bbox": [1135.52880859375, 80.04851531982422, 344.5068359375, 605.3788528442383], "score": 0.09275040030479431}, {"image_id": 13, "category_id": 1, "bbox": [6.7974700927734375, 27.61226463317871, 952.6165924072266, 959.048318862915], "score": 0.075650155544281}, {"image_id": 13, "category_id": 1, "bbox": [827.3930053710938, 171.8137664794922, 382.22955322265625, 605.0484771728516], "score": 0.06332091987133026}, {"image_id": 13, "category_id": 1, "bbox": [1068.3350830078125, 19.06878089904785, 299.3763427734375, 602.2190608978271], "score": 0.05526883155107498}, {"image_id": 13, "category_id": 1, "bbox": [1678.7744140625, 0.0, 241.2255859375, 211.556640625], "score": 0.04321129247546196}, {"image_id": 13, "category_id": 1, "bbox": [186.60585021972656, 43.96826171875, 412.2302703857422, 594.4461669921875], "score": 0.04306570813059807}, {"image_id": 13, "category_id": 1, "bbox": [970.5655517578125, 71.77364349365234, 344.27978515625, 600.4040908813477], "score": 0.04286087304353714}, {"image_id": 13, "category_id": 1, "bbox": [331.7336730957031, 272.4233703613281, 1063.9491882324219, 807.5766296386719], "score": 0.03910084813833237}, {"image_id": 13, "category_id": 1, "bbox": [804.0335693359375, 267.7041015625, 905.298583984375, 812.2958984375], "score": 0.03688674047589302}, {"image_id": 13, "category_id": 2, "bbox": [776.15185546875, 322.7688903808594, 403.24755859375, 565.0983581542969], "score": 0.2916705310344696}, {"image_id": 13, "category_id": 2, "bbox": [815.7804565429688, 437.78851318359375, 383.99725341796875, 642.2114868164062], "score": 0.17047597467899323}, {"image_id": 13, "category_id": 2, "bbox": [864.4147338867188, 321.6148376464844, 439.20648193359375, 568.7245178222656], "score": 0.16715842485427856}, {"image_id": 13, "category_id": 2, "bbox": [827.3930053710938, 171.8137664794922, 382.22955322265625, 605.0484771728516], "score": 0.1279425173997879}, {"image_id": 13, "category_id": 2, "bbox": [652.2581787109375, 309.71124267578125, 440.6287841796875, 585.625732421875], "score": 0.11624691635370255}, {"image_id": 13, "category_id": 2, "bbox": [1063.0201416015625, 171.90008544921875, 308.406494140625, 677.6968994140625], "score": 0.09191886335611343}, {"image_id": 13, "category_id": 2, "bbox": [1037.00341796875, 342.51873779296875, 344.5321044921875, 617.31884765625], "score": 0.08086909353733063}, {"image_id": 13, "category_id": 2, "bbox": [888.8023681640625, 505.1412353515625, 442.7354736328125, 574.8587646484375], "score": 0.07968111336231232}, {"image_id": 13, "category_id": 2, "bbox": [865.5496215820312, 21.889760971069336, 346.43609619140625, 562.2301731109619], "score": 0.07933343946933746}, {"image_id": 13, "category_id": 2, "bbox": [751.5243530273438, 123.72441864013672, 398.92620849609375, 570.591194152832], "score": 0.07890597730875015}, {"image_id": 13, "category_id": 2, "bbox": [994.81787109375, 445.7964172363281, 417.281494140625, 634.2035827636719], "score": 0.06461313366889954}, {"image_id": 13, "category_id": 2, "bbox": [653.468994140625, 463.0584716796875, 418.27783203125, 604.4945068359375], "score": 0.06278488785028458}, {"image_id": 13, "category_id": 2, "bbox": [741.0648193359375, 0.0, 406.04248046875, 525.6809692382812], "score": 0.05975921079516411}, {"image_id": 13, "category_id": 2, "bbox": [627.1620483398438, 175.600830078125, 449.10101318359375, 576.8445434570312], "score": 0.05956937372684479}, {"image_id": 13, "category_id": 2, "bbox": [1104.3973388671875, 167.74453735351562, 404.55029296875, 683.2862854003906], "score": 0.05729032680392265}, {"image_id": 13, "category_id": 2, "bbox": [970.5655517578125, 71.77364349365234, 344.27978515625, 600.4040908813477], "score": 0.05438084900379181}, {"image_id": 13, "category_id": 2, "bbox": [801.876708984375, 632.88232421875, 476.611572265625, 447.11767578125], "score": 0.04965537041425705}, {"image_id": 13, "category_id": 2, "bbox": [1068.3350830078125, 19.06878089904785, 299.3763427734375, 602.2190608978271], "score": 0.04887968674302101}, {"image_id": 13, "category_id": 2, "bbox": [198.2449951171875, 336.5740661621094, 505.79022216796875, 575.8031311035156], "score": 0.04592747613787651}, {"image_id": 13, "category_id": 2, "bbox": [1625.822021484375, 0.0, 294.177978515625, 199.13233947753906], "score": 0.0458730012178421}, {"image_id": 13, "category_id": 2, "bbox": [530.0716552734375, 361.0831604003906, 448.9189453125, 601.9794006347656], "score": 0.04570721089839935}, {"image_id": 13, "category_id": 2, "bbox": [625.42822265625, 10.96102523803711, 415.9625244140625, 579.0377540588379], "score": 0.045351505279541016}, {"image_id": 13, "category_id": 2, "bbox": [936.85546875, 0.0, 425.9803466796875, 458.962646484375], "score": 0.04510985687375069}, {"image_id": 13, "category_id": 2, "bbox": [103.81562805175781, 288.9329833984375, 487.6060028076172, 561.3019409179688], "score": 0.04462030902504921}, {"image_id": 13, "category_id": 2, "bbox": [328.2344055175781, 328.0498352050781, 1056.6026306152344, 751.9501647949219], "score": 0.043074917048215866}, {"image_id": 13, "category_id": 2, "bbox": [680.9085083007812, 695.6690673828125, 524.4906616210938, 384.3309326171875], "score": 0.04272403195500374}, {"image_id": 13, "category_id": 2, "bbox": [993.3001098632812, 695.5779418945312, 452.17437744140625, 384.42205810546875], "score": 0.04264531284570694}, {"image_id": 13, "category_id": 2, "bbox": [109.85691833496094, 42.091556549072266, 1092.5318756103516, 930.3917198181152], "score": 0.04230321943759918}, {"image_id": 13, "category_id": 2, "bbox": [519.4819946289062, 230.5518798828125, 456.98529052734375, 563.0648803710938], "score": 0.042205940932035446}, {"image_id": 13, "category_id": 2, "bbox": [870.0943603515625, 162.03904724121094, 315.5721435546875, 239.8847198486328], "score": 0.041485123336315155}, {"image_id": 13, "category_id": 2, "bbox": [0.0, 118.55140686035156, 797.3106079101562, 919.7321624755859], "score": 0.04018193483352661}, {"image_id": 13, "category_id": 2, "bbox": [1094.5074462890625, 511.6956481933594, 425.8875732421875, 568.3043518066406], "score": 0.04000389948487282}, {"image_id": 13, "category_id": 2, "bbox": [221.330078125, 220.64906311035156, 440.5894775390625, 577.0656585693359], "score": 0.039583079516887665}, {"image_id": 13, "category_id": 2, "bbox": [0.0, 407.6270446777344, 352.78076171875, 565.4349670410156], "score": 0.039548151195049286}, {"image_id": 13, "category_id": 2, "bbox": [1135.52880859375, 80.04851531982422, 344.5068359375, 605.3788528442383], "score": 0.03870926797389984}, {"image_id": 13, "category_id": 2, "bbox": [347.78521728515625, 0.0, 1048.4440307617188, 826.0125122070312], "score": 0.038249023258686066}, {"image_id": 13, "category_id": 2, "bbox": [1206.0218505859375, 453.54302978515625, 408.974365234375, 624.2979125976562], "score": 0.038235992193222046}, {"image_id": 13, "category_id": 2, "bbox": [951.1736450195312, 336.38824462890625, 968.8263549804688, 743.6117553710938], "score": 0.0377960130572319}, {"image_id": 13, "category_id": 2, "bbox": [921.0635986328125, 211.87229919433594, 317.706298828125, 247.62889099121094], "score": 0.03719158098101616}, {"image_id": 13, "category_id": 2, "bbox": [106.42570495605469, 419.9749755859375, 483.07740783691406, 636.43310546875], "score": 0.03715597093105316}, {"image_id": 13, "category_id": 2, "bbox": [811.1082153320312, 0.0, 461.07501220703125, 409.7390441894531], "score": 0.03706620633602142}, {"image_id": 13, "category_id": 2, "bbox": [488.21539306640625, 681.0630493164062, 531.3453979492188, 398.93695068359375], "score": 0.03690110519528389}, {"image_id": 13, "category_id": 2, "bbox": [300.2687072753906, 0.0, 455.7886047363281, 552.0108642578125], "score": 0.03643253445625305}, {"image_id": 13, "category_id": 2, "bbox": [36.12345886230469, 0.0, 911.4264678955078, 495.3916015625], "score": 0.036120083183050156}, {"image_id": 13, "category_id": 2, "bbox": [20.6890869140625, 349.4113464355469, 436.0954284667969, 557.2390441894531], "score": 0.036048002541065216}, {"image_id": 13, "category_id": 3, "bbox": [799.7197875976562, 285.7389221191406, 404.77447509765625, 561.6141052246094], "score": 0.734651505947113}, {"image_id": 13, "category_id": 3, "bbox": [864.4147338867188, 321.6148376464844, 439.20648193359375, 568.7245178222656], "score": 0.4007243812084198}, {"image_id": 13, "category_id": 3, "bbox": [813.2650146484375, 395.2685546875, 377.86572265625, 610.4583740234375], "score": 0.39527878165245056}, {"image_id": 13, "category_id": 3, "bbox": [1051.9691162109375, 289.8795166015625, 322.8779296875, 626.1455078125], "score": 0.3111106753349304}, {"image_id": 13, "category_id": 3, "bbox": [652.2581787109375, 309.71124267578125, 440.6287841796875, 585.625732421875], "score": 0.15636461973190308}, {"image_id": 13, "category_id": 3, "bbox": [1066.766357421875, 120.82662963867188, 295.164306640625, 619.3522033691406], "score": 0.11640317738056183}, {"image_id": 13, "category_id": 3, "bbox": [1097.801513671875, 225.07484436035156, 403.6663818359375, 655.0750579833984], "score": 0.11478652060031891}, {"image_id": 13, "category_id": 3, "bbox": [865.5496215820312, 21.889760971069336, 346.43609619140625, 562.2301731109619], "score": 0.10052988678216934}, {"image_id": 13, "category_id": 3, "bbox": [751.5243530273438, 123.72441864013672, 398.92620849609375, 570.591194152832], "score": 0.08639268577098846}, {"image_id": 13, "category_id": 3, "bbox": [109.85691833496094, 42.091556549072266, 1092.5318756103516, 930.3917198181152], "score": 0.06025842949748039}, {"image_id": 13, "category_id": 3, "bbox": [880.58349609375, 441.35174560546875, 423.017333984375, 638.6482543945312], "score": 0.05747906491160393}, {"image_id": 13, "category_id": 3, "bbox": [827.0574340820312, 518.0526123046875, 318.10748291015625, 284.78961181640625], "score": 0.05658680573105812}, {"image_id": 13, "category_id": 3, "bbox": [964.5520629882812, 494.5733947753906, 317.62408447265625, 279.5555725097656], "score": 0.055759135633707047}, {"image_id": 13, "category_id": 3, "bbox": [871.9427490234375, 131.6931610107422, 314.0518798828125, 247.0775909423828], "score": 0.055057208985090256}, {"image_id": 13, "category_id": 3, "bbox": [872.2824096679688, 466.1983337402344, 313.25506591796875, 282.0885925292969], "score": 0.054581716656684875}, {"image_id": 13, "category_id": 3, "bbox": [1013.0321044921875, 439.86474609375, 317.649658203125, 280.65234375], "score": 0.054409172385931015}, {"image_id": 13, "category_id": 3, "bbox": [819.7985229492188, 598.328857421875, 327.87957763671875, 289.14697265625], "score": 0.050125475972890854}, {"image_id": 13, "category_id": 3, "bbox": [875.2177734375, 309.1549987792969, 316.8480224609375, 274.6913757324219], "score": 0.04915712773799896}, {"image_id": 13, "category_id": 3, "bbox": [969.495849609375, 309.6170349121094, 316.936279296875, 271.1878356933594], "score": 0.047654904425144196}, {"image_id": 13, "category_id": 3, "bbox": [970.8292236328125, 133.27456665039062, 312.84326171875, 245.06829833984375], "score": 0.047415316104888916}, {"image_id": 13, "category_id": 3, "bbox": [823.40283203125, 416.49737548828125, 318.9549560546875, 275.942138671875], "score": 0.046849701553583145}, {"image_id": 13, "category_id": 3, "bbox": [921.0635986328125, 211.87229919433594, 317.706298828125, 247.62889099121094], "score": 0.046315379440784454}, {"image_id": 13, "category_id": 3, "bbox": [721.1943969726562, 627.8460083007812, 327.01727294921875, 286.1239013671875], "score": 0.04601248726248741}, {"image_id": 13, "category_id": 3, "bbox": [1678.7744140625, 0.0, 241.2255859375, 211.556640625], "score": 0.04590589925646782}, {"image_id": 13, "category_id": 3, "bbox": [1061.1041259765625, 385.0138244628906, 319.6937255859375, 282.2316589355469], "score": 0.045886747539043427}, {"image_id": 13, "category_id": 3, "bbox": [918.1514892578125, 417.6246032714844, 319.4979248046875, 272.9564514160156], "score": 0.044349633157253265}, {"image_id": 13, "category_id": 3, "bbox": [1106.4967041015625, 441.0483703613281, 322.7528076171875, 276.1659240722656], "score": 0.04352360591292381}, {"image_id": 13, "category_id": 3, "bbox": [1058.5870361328125, 521.8656005859375, 324.214111328125, 278.6434326171875], "score": 0.04345933720469475}, {"image_id": 13, "category_id": 3, "bbox": [915.275634765625, 552.992431640625, 323.239013671875, 271.82623291015625], "score": 0.0426366813480854}, {"image_id": 13, "category_id": 3, "bbox": [723.626220703125, 551.8345336914062, 329.32373046875, 275.21441650390625], "score": 0.04158239811658859}, {"image_id": 13, "category_id": 3, "bbox": [1061.962646484375, 305.4801940917969, 317.0738525390625, 278.9216003417969], "score": 0.04139864444732666}, {"image_id": 13, "category_id": 3, "bbox": [972.0774536132812, 233.40576171875, 315.55682373046875, 262.141845703125], "score": 0.04138676077127457}, {"image_id": 13, "category_id": 3, "bbox": [0.0, 259.97381591796875, 1023.9781494140625, 820.0261840820312], "score": 0.04062343016266823}, {"image_id": 13, "category_id": 3, "bbox": [776.1332397460938, 471.5803527832031, 325.05133056640625, 272.6305236816406], "score": 0.040071140974760056}, {"image_id": 13, "category_id": 3, "bbox": [776.4819946289062, 310.1250915527344, 319.89422607421875, 270.8674621582031], "score": 0.03994647413492203}, {"image_id": 13, "category_id": 3, "bbox": [331.7336730957031, 272.4233703613281, 1063.9491882324219, 807.5766296386719], "score": 0.039904747158288956}, {"image_id": 13, "category_id": 3, "bbox": [920.6629028320312, 76.89158630371094, 317.14202880859375, 249.1602020263672], "score": 0.03905251622200012}, {"image_id": 13, "category_id": 3, "bbox": [960.7467041015625, 19.523099899291992, 341.85498046875, 583.148775100708], "score": 0.038521070033311844}, {"image_id": 13, "category_id": 3, "bbox": [965.9035034179688, 607.0859375, 324.82574462890625, 275.560302734375], "score": 0.03841414675116539}, {"image_id": 13, "category_id": 3, "bbox": [1015.0054321289062, 73.39728546142578, 318.92779541015625, 253.82772064208984], "score": 0.03744347393512726}, {"image_id": 13, "category_id": 3, "bbox": [1012.144775390625, 184.5445556640625, 324.7587890625, 251.29629516601562], "score": 0.036381661891937256}, {"image_id": 14, "category_id": 1, "bbox": [569.6050415039062, 505.26483154296875, 433.4776611328125, 564.8944702148438], "score": 0.7492919564247131}, {"image_id": 14, "category_id": 1, "bbox": [586.276611328125, 242.8451385498047, 290.046630859375, 548.0461578369141], "score": 0.5230174660682678}, {"image_id": 14, "category_id": 1, "bbox": [499.3105163574219, 188.3776397705078, 347.8390808105469, 548.2425994873047], "score": 0.2985485792160034}, {"image_id": 14, "category_id": 1, "bbox": [486.28668212890625, 290.1344299316406, 377.04534912109375, 646.4612121582031], "score": 0.20212659239768982}, {"image_id": 14, "category_id": 1, "bbox": [606.1704711914062, 187.20294189453125, 399.9027099609375, 550.1322021484375], "score": 0.15105919539928436}, {"image_id": 14, "category_id": 1, "bbox": [201.04409790039062, 52.110755920410156, 390.5030212402344, 573.6294784545898], "score": 0.12905052304267883}, {"image_id": 14, "category_id": 1, "bbox": [930.3250732421875, 504.1339416503906, 343.957275390625, 544.2066345214844], "score": 0.11969044059515}, {"image_id": 14, "category_id": 1, "bbox": [1248.7017822265625, 299.7359619140625, 361.0047607421875, 643.602294921875], "score": 0.10032922774553299}, {"image_id": 14, "category_id": 1, "bbox": [1257.797607421875, 484.0981140136719, 343.3253173828125, 595.9018859863281], "score": 0.08988887816667557}, {"image_id": 14, "category_id": 1, "bbox": [613.4051513671875, 300.8916931152344, 399.87652587890625, 637.0740051269531], "score": 0.08370402455329895}, {"image_id": 14, "category_id": 1, "bbox": [4.5916900634765625, 26.738800048828125, 975.5864105224609, 966.4670104980469], "score": 0.07621654123067856}, {"image_id": 14, "category_id": 1, "bbox": [816.2017822265625, 31.955692291259766, 905.6026611328125, 964.5629844665527], "score": 0.06484781205654144}, {"image_id": 14, "category_id": 1, "bbox": [378.3842468261719, 120.2198486328125, 987.8415832519531, 920.9013671875], "score": 0.06186004355549812}, {"image_id": 14, "category_id": 1, "bbox": [341.9305114746094, 468.6282043457031, 1020.5469055175781, 611.3717956542969], "score": 0.060048963874578476}, {"image_id": 14, "category_id": 1, "bbox": [909.0184936523438, 0.0, 438.06170654296875, 597.818603515625], "score": 0.058257874101400375}, {"image_id": 14, "category_id": 1, "bbox": [0.0, 488.86273193359375, 357.4452209472656, 583.3659057617188], "score": 0.05602836608886719}, {"image_id": 14, "category_id": 1, "bbox": [534.5953979492188, 0.0, 366.6973876953125, 556.70166015625], "score": 0.051533058285713196}, {"image_id": 14, "category_id": 1, "bbox": [1679.528076171875, 0.0, 240.471923828125, 213.98570251464844], "score": 0.04809180274605751}, {"image_id": 14, "category_id": 1, "bbox": [0.0, 118.50505828857422, 596.1981811523438, 932.0658645629883], "score": 0.04790490120649338}, {"image_id": 14, "category_id": 1, "bbox": [1309.2427978515625, 199.88589477539062, 341.6549072265625, 650.6334533691406], "score": 0.045140162110328674}, {"image_id": 14, "category_id": 1, "bbox": [428.5696105957031, 0.0, 414.4690856933594, 628.3800048828125], "score": 0.04498991370201111}, {"image_id": 14, "category_id": 1, "bbox": [362.0970764160156, 225.2012176513672, 452.3360900878906, 581.1761627197266], "score": 0.04318368062376976}, {"image_id": 14, "category_id": 1, "bbox": [1047.6607666015625, 144.0976104736328, 801.789794921875, 902.4178924560547], "score": 0.04044411703944206}, {"image_id": 14, "category_id": 1, "bbox": [14.189163208007812, 599.4202270507812, 479.8469696044922, 480.57977294921875], "score": 0.038449373096227646}, {"image_id": 14, "category_id": 2, "bbox": [932.5823364257812, 458.2019348144531, 341.42730712890625, 548.5691833496094], "score": 0.3290599584579468}, {"image_id": 14, "category_id": 2, "bbox": [963.0745239257812, 507.1696472167969, 440.41534423828125, 540.7639465332031], "score": 0.15717020630836487}, {"image_id": 14, "category_id": 2, "bbox": [927.1651611328125, 630.3759765625, 346.1273193359375, 449.6240234375], "score": 0.14874255657196045}, {"image_id": 14, "category_id": 2, "bbox": [571.4718627929688, 447.808349609375, 423.5208740234375, 609.8187255859375], "score": 0.14451724290847778}, {"image_id": 14, "category_id": 2, "bbox": [460.0724792480469, 504.81268310546875, 464.7417907714844, 567.0371704101562], "score": 0.10604557394981384}, {"image_id": 14, "category_id": 2, "bbox": [700.8953247070312, 408.8575439453125, 401.40435791015625, 665.3037109375], "score": 0.09301237761974335}, {"image_id": 14, "category_id": 2, "bbox": [818.9024047851562, 339.03314208984375, 411.27593994140625, 570.3923950195312], "score": 0.07857807725667953}, {"image_id": 14, "category_id": 2, "bbox": [509.8990173339844, 694.3297119140625, 500.7943420410156, 385.6702880859375], "score": 0.0662052184343338}, {"image_id": 14, "category_id": 2, "bbox": [1094.0546875, 492.6336669921875, 443.995849609375, 579.8310546875], "score": 0.0613977313041687}, {"image_id": 14, "category_id": 2, "bbox": [579.7796630859375, 286.9581604003906, 322.65435791015625, 658.1825256347656], "score": 0.05699366703629494}, {"image_id": 14, "category_id": 2, "bbox": [378.21270751953125, 774.2218627929688, 537.3538208007812, 305.77813720703125], "score": 0.05366352200508118}, {"image_id": 14, "category_id": 2, "bbox": [587.6294555664062, 191.35556030273438, 280.4974365234375, 543.1881408691406], "score": 0.050795480608940125}, {"image_id": 14, "category_id": 2, "bbox": [341.9305114746094, 468.6282043457031, 1020.5469055175781, 611.3717956542969], "score": 0.050642188638448715}, {"image_id": 14, "category_id": 2, "bbox": [687.25048828125, 708.8690795898438, 448.4739990234375, 371.13092041015625], "score": 0.04930110275745392}, {"image_id": 14, "category_id": 2, "bbox": [999.009521484375, 693.3577880859375, 416.6016845703125, 386.6422119140625], "score": 0.04815195873379707}, {"image_id": 14, "category_id": 2, "bbox": [18.563507080078125, 108.55535888671875, 960.6081848144531, 940.0033569335938], "score": 0.04782688245177269}, {"image_id": 14, "category_id": 2, "bbox": [1624.7733154296875, 0.0, 295.2266845703125, 199.58065795898438], "score": 0.04740022495388985}, {"image_id": 14, "category_id": 2, "bbox": [378.3842468261719, 120.2198486328125, 987.8415832519531, 920.9013671875], "score": 0.04701664671301842}, {"image_id": 14, "category_id": 2, "bbox": [85.08332061767578, 721.5658569335938, 543.9235153198242, 358.43414306640625], "score": 0.04672130197286606}, {"image_id": 14, "category_id": 2, "bbox": [945.1261596679688, 273.21746826171875, 361.94708251953125, 586.7745361328125], "score": 0.04666861891746521}, {"image_id": 14, "category_id": 2, "bbox": [193.17672729492188, 0.0, 393.8873596191406, 565.06201171875], "score": 0.04254540055990219}, {"image_id": 14, "category_id": 2, "bbox": [320.6903381347656, 667.8306884765625, 479.6285095214844, 412.1693115234375], "score": 0.04242747649550438}, {"image_id": 14, "category_id": 2, "bbox": [792.2389526367188, 754.1958618164062, 477.33551025390625, 325.80413818359375], "score": 0.042377810925245285}, {"image_id": 14, "category_id": 2, "bbox": [750.3377685546875, 252.14096069335938, 972.341552734375, 827.8590393066406], "score": 0.04229089617729187}, {"image_id": 14, "category_id": 2, "bbox": [1257.797607421875, 484.0981140136719, 343.3253173828125, 595.9018859863281], "score": 0.04136119410395622}, {"image_id": 14, "category_id": 2, "bbox": [1008.3387451171875, 331.8180847167969, 382.689453125, 588.5465393066406], "score": 0.04111059382557869}, {"image_id": 14, "category_id": 2, "bbox": [496.7655029296875, 262.53070068359375, 356.901123046875, 611.6620483398438], "score": 0.04096834734082222}, {"image_id": 14, "category_id": 2, "bbox": [1243.4873046875, 247.8162841796875, 366.5089111328125, 658.731689453125], "score": 0.04065350815653801}, {"image_id": 14, "category_id": 2, "bbox": [903.8597412109375, 460.6941833496094, 1016.1402587890625, 619.3058166503906], "score": 0.03980628773570061}, {"image_id": 14, "category_id": 2, "bbox": [491.13720703125, 118.92250061035156, 359.49920654296875, 576.5767059326172], "score": 0.03978777304291725}, {"image_id": 14, "category_id": 2, "bbox": [679.024658203125, 586.88330078125, 1034.9012451171875, 493.11669921875], "score": 0.039567671716213226}, {"image_id": 14, "category_id": 2, "bbox": [196.43408203125, 113.83260345458984, 392.7154541015625, 649.3088760375977], "score": 0.03943919762969017}, {"image_id": 14, "category_id": 2, "bbox": [1129.729248046875, 309.6603088378906, 430.130615234375, 620.6886901855469], "score": 0.03904695808887482}, {"image_id": 14, "category_id": 2, "bbox": [690.9908447265625, 279.6025390625, 438.5875244140625, 574.4407958984375], "score": 0.03892097249627113}, {"image_id": 14, "category_id": 2, "bbox": [307.34954833984375, 0.0, 441.0343017578125, 588.9627685546875], "score": 0.03823527693748474}, {"image_id": 14, "category_id": 2, "bbox": [844.2700805664062, 0.0, 424.97637939453125, 594.5195922851562], "score": 0.03788423910737038}, {"image_id": 14, "category_id": 2, "bbox": [201.5998992919922, 782.4696655273438, 501.6095733642578, 297.53033447265625], "score": 0.03739278018474579}, {"image_id": 14, "category_id": 2, "bbox": [362.0970764160156, 225.2012176513672, 452.3360900878906, 581.1761627197266], "score": 0.03738988935947418}, {"image_id": 14, "category_id": 2, "bbox": [0.0, 466.8345642089844, 1054.98681640625, 613.1654357910156], "score": 0.03728495165705681}, {"image_id": 14, "category_id": 3, "bbox": [930.3250732421875, 504.1339416503906, 343.957275390625, 544.2066345214844], "score": 0.5769980549812317}, {"image_id": 14, "category_id": 3, "bbox": [590.0835571289062, 505.6648864746094, 427.9639892578125, 562.7640686035156], "score": 0.23946282267570496}, {"image_id": 14, "category_id": 3, "bbox": [966.5813598632812, 548.4899291992188, 437.03643798828125, 531.5100708007812], "score": 0.2139546424150467}, {"image_id": 14, "category_id": 3, "bbox": [341.9305114746094, 468.6282043457031, 1020.5469055175781, 611.3717956542969], "score": 0.13921254873275757}, {"image_id": 14, "category_id": 3, "bbox": [579.3218994140625, 122.0561294555664, 300.52093505859375, 574.4690780639648], "score": 0.09972423315048218}, {"image_id": 14, "category_id": 3, "bbox": [927.88330078125, 336.1073913574219, 372.0198974609375, 577.4112243652344], "score": 0.0980660542845726}, {"image_id": 14, "category_id": 3, "bbox": [201.04409790039062, 52.110755920410156, 390.5030212402344, 573.6294784545898], "score": 0.08479738235473633}, {"image_id": 14, "category_id": 3, "bbox": [1257.837646484375, 433.162841796875, 345.557373046875, 587.1565551757812], "score": 0.08248139917850494}, {"image_id": 14, "category_id": 3, "bbox": [499.3105163574219, 188.3776397705078, 347.8390808105469, 548.2425994873047], "score": 0.07403841614723206}, {"image_id": 14, "category_id": 3, "bbox": [538.6162719726562, 250.87184143066406, 979.2365112304688, 829.1281585693359], "score": 0.0720636397600174}, {"image_id": 14, "category_id": 3, "bbox": [108.2098388671875, 271.77398681640625, 1127.115234375, 808.2260131835938], "score": 0.06969685107469559}, {"image_id": 14, "category_id": 3, "bbox": [846.1294555664062, 388.3124694824219, 406.29180908203125, 595.7688293457031], "score": 0.06508618593215942}, {"image_id": 14, "category_id": 3, "bbox": [983.2190551757812, 396.87994384765625, 402.07037353515625, 577.4811401367188], "score": 0.06361851841211319}, {"image_id": 14, "category_id": 3, "bbox": [579.7796630859375, 286.9581604003906, 322.65435791015625, 658.1825256347656], "score": 0.0627179965376854}, {"image_id": 14, "category_id": 3, "bbox": [834.7819213867188, 621.2877807617188, 428.05438232421875, 458.71221923828125], "score": 0.05990297719836235}, {"image_id": 14, "category_id": 3, "bbox": [925.3731079101562, 656.8285522460938, 310.31463623046875, 278.44659423828125], "score": 0.057866018265485764}, {"image_id": 14, "category_id": 3, "bbox": [340.5570068359375, 829.281982421875, 313.84600830078125, 250.718017578125], "score": 0.05583702027797699}, {"image_id": 14, "category_id": 3, "bbox": [460.0724792480469, 504.81268310546875, 464.7417907714844, 567.0371704101562], "score": 0.055171892046928406}, {"image_id": 14, "category_id": 3, "bbox": [0.0, 466.8345642089844, 1054.98681640625, 613.1654357910156], "score": 0.05316987633705139}, {"image_id": 14, "category_id": 3, "bbox": [1679.528076171875, 0.0, 240.471923828125, 213.98570251464844], "score": 0.0503186471760273}, {"image_id": 14, "category_id": 3, "bbox": [1315.828857421875, 301.5069274902344, 332.4617919921875, 636.8389587402344], "score": 0.04874228313565254}, {"image_id": 14, "category_id": 3, "bbox": [606.1704711914062, 187.20294189453125, 399.9027099609375, 550.1322021484375], "score": 0.04873416945338249}, {"image_id": 14, "category_id": 3, "bbox": [924.0281982421875, 550.0474853515625, 308.3609619140625, 273.3729248046875], "score": 0.048175521194934845}, {"image_id": 14, "category_id": 3, "bbox": [965.6502685546875, 736.6380615234375, 320.085205078125, 281.648193359375], "score": 0.046629950404167175}, {"image_id": 14, "category_id": 3, "bbox": [18.563507080078125, 108.55535888671875, 960.6081848144531, 940.0033569335938], "score": 0.04647887870669365}, {"image_id": 14, "category_id": 3, "bbox": [595.3455200195312, 275.3703918457031, 312.77435302734375, 280.1887512207031], "score": 0.04546533524990082}, {"image_id": 14, "category_id": 3, "bbox": [972.405029296875, 609.522705078125, 314.2618408203125, 267.0030517578125], "score": 0.0435611829161644}, {"image_id": 14, "category_id": 3, "bbox": [964.7305297851562, 826.556396484375, 318.08966064453125, 253.443603515625], "score": 0.04225294291973114}, {"image_id": 14, "category_id": 3, "bbox": [721.024169921875, 793.4166870117188, 328.5772705078125, 278.67755126953125], "score": 0.04055197909474373}, {"image_id": 14, "category_id": 3, "bbox": [495.6296081542969, 278.9726257324219, 317.7154235839844, 273.4951477050781], "score": 0.03966778144240379}, {"image_id": 14, "category_id": 3, "bbox": [315.84521484375, 35.60888671875, 1090.3138427734375, 970.8032836914062], "score": 0.03910239785909653}, {"image_id": 14, "category_id": 3, "bbox": [583.237548828125, 546.8082275390625, 317.163818359375, 280.76971435546875], "score": 0.03886453062295914}, {"image_id": 14, "category_id": 3, "bbox": [864.9913330078125, 714.0511474609375, 328.90869140625, 273.5772705078125], "score": 0.03836856037378311}, {"image_id": 14, "category_id": 3, "bbox": [870.7115478515625, 611.182861328125, 323.6715087890625, 267.39202880859375], "score": 0.038327332586050034}, {"image_id": 14, "category_id": 3, "bbox": [826.103271484375, 560.5193481445312, 316.136474609375, 252.6168212890625], "score": 0.038045525550842285}, {"image_id": 14, "category_id": 3, "bbox": [823.4773559570312, 666.86767578125, 321.50262451171875, 261.1644287109375], "score": 0.03769077733159065}, {"image_id": 14, "category_id": 3, "bbox": [687.3016357421875, 400.66583251953125, 445.09375, 572.3491821289062], "score": 0.03721703216433525}, {"image_id": 15, "category_id": 1, "bbox": [285.50555419921875, 403.04351806640625, 311.56280517578125, 316.95648193359375], "score": 0.24857108294963837}, {"image_id": 15, "category_id": 1, "bbox": [529.3870849609375, 331.2480163574219, 268.50189208984375, 378.3884582519531], "score": 0.18696080148220062}, {"image_id": 15, "category_id": 1, "bbox": [330.2179260253906, 473.3468017578125, 313.3990173339844, 246.6531982421875], "score": 0.1765640527009964}, {"image_id": 15, "category_id": 1, "bbox": [300.55072021484375, 62.338348388671875, 547.6325073242188, 639.2954406738281], "score": 0.16862550377845764}, {"image_id": 15, "category_id": 1, "bbox": [1016.231689453125, 0.0, 247.6005859375, 409.560791015625], "score": 0.159682035446167}, {"image_id": 15, "category_id": 1, "bbox": [902.0829467773438, 237.55178833007812, 245.85150146484375, 418.2108459472656], "score": 0.11155053228139877}, {"image_id": 15, "category_id": 1, "bbox": [326.7255554199219, 313.7384948730469, 331.7193298339844, 406.2615051269531], "score": 0.09946274012327194}, {"image_id": 15, "category_id": 1, "bbox": [274.7166442871094, 295.96795654296875, 588.0730285644531, 424.03204345703125], "score": 0.08839057385921478}, {"image_id": 15, "category_id": 1, "bbox": [395.2164001464844, 406.2527160644531, 314.6112976074219, 313.7472839355469], "score": 0.08721306174993515}, {"image_id": 15, "category_id": 1, "bbox": [88.44869995117188, 11.178245544433594, 710.3774719238281, 641.6105117797852], "score": 0.07784419506788254}, {"image_id": 15, "category_id": 1, "bbox": [884.0531616210938, 152.98733520507812, 254.31829833984375, 379.1787414550781], "score": 0.07197190076112747}, {"image_id": 15, "category_id": 1, "bbox": [0.0, 89.50159454345703, 661.6312866210938, 612.9911422729492], "score": 0.06661410629749298}, {"image_id": 15, "category_id": 1, "bbox": [543.6611328125, 223.93408203125, 255.55487060546875, 372.31805419921875], "score": 0.06459765136241913}, {"image_id": 15, "category_id": 1, "bbox": [2.2493133544921875, 250.79620361328125, 263.9267120361328, 400.81903076171875], "score": 0.0627378523349762}, {"image_id": 15, "category_id": 1, "bbox": [582.8905029296875, 356.8600769042969, 283.24029541015625, 363.1399230957031], "score": 0.05682627111673355}, {"image_id": 15, "category_id": 1, "bbox": [471.23016357421875, 432.6015625, 307.1199951171875, 287.3984375], "score": 0.0538707971572876}, {"image_id": 15, "category_id": 1, "bbox": [476.34197998046875, 105.54218292236328, 692.2787475585938, 593.5269088745117], "score": 0.052256807684898376}, {"image_id": 15, "category_id": 1, "bbox": [1049.05517578125, 79.83586120605469, 230.94482421875, 392.7246856689453], "score": 0.04855263978242874}, {"image_id": 15, "category_id": 1, "bbox": [119.428466796875, 396.5010070800781, 671.7646484375, 323.4989929199219], "score": 0.04277246445417404}, {"image_id": 15, "category_id": 1, "bbox": [0.0, 242.35067749023438, 526.080322265625, 477.6493225097656], "score": 0.04064059630036354}, {"image_id": 15, "category_id": 1, "bbox": [0.0, 94.27944946289062, 356.51171875, 606.9027404785156], "score": 0.04024088755249977}, {"image_id": 15, "category_id": 1, "bbox": [1039.08544921875, 0.0, 240.91455078125, 282.1475524902344], "score": 0.03929464519023895}, {"image_id": 15, "category_id": 2, "bbox": [533.130126953125, 300.8678283691406, 262.27130126953125, 389.0122985839844], "score": 0.27847743034362793}, {"image_id": 15, "category_id": 2, "bbox": [576.6292724609375, 333.5790710449219, 293.77044677734375, 376.2446594238281], "score": 0.1917218565940857}, {"image_id": 15, "category_id": 2, "bbox": [388.874267578125, 323.3922424316406, 347.72802734375, 396.6077575683594], "score": 0.14392583072185516}, {"image_id": 15, "category_id": 2, "bbox": [494.3774719238281, 223.1995086669922, 287.6388854980469, 377.02577209472656], "score": 0.1180533692240715}, {"image_id": 15, "category_id": 2, "bbox": [589.009521484375, 228.56199645996094, 272.38641357421875, 365.19178771972656], "score": 0.10535042732954025}, {"image_id": 15, "category_id": 2, "bbox": [471.23016357421875, 432.6015625, 307.1199951171875, 287.3984375], "score": 0.09029842168092728}, {"image_id": 15, "category_id": 2, "bbox": [404.2845458984375, 447.97967529296875, 282.180419921875, 272.02032470703125], "score": 0.09017234295606613}, {"image_id": 15, "category_id": 2, "bbox": [332.267578125, 288.4296875, 321.8419189453125, 411.88128662109375], "score": 0.08083277195692062}, {"image_id": 15, "category_id": 2, "bbox": [652.8941040039062, 298.2981262207031, 293.82122802734375, 388.6935729980469], "score": 0.07637148350477219}, {"image_id": 15, "category_id": 2, "bbox": [292.68060302734375, 444.5769958496094, 304.04656982421875, 275.4230041503906], "score": 0.07260096818208694}, {"image_id": 15, "category_id": 2, "bbox": [300.55072021484375, 62.338348388671875, 547.6325073242188, 639.2954406738281], "score": 0.06843706220388412}, {"image_id": 15, "category_id": 2, "bbox": [156.8812713623047, 125.00367736816406, 292.0859832763672, 358.34446716308594], "score": 0.05948814004659653}, {"image_id": 15, "category_id": 2, "bbox": [88.220458984375, 88.5527572631836, 307.1129150390625, 362.0991897583008], "score": 0.05637764930725098}, {"image_id": 15, "category_id": 2, "bbox": [717.5806884765625, 321.3255920410156, 294.80908203125, 398.6744079589844], "score": 0.054824262857437134}, {"image_id": 15, "category_id": 2, "bbox": [588.0869750976562, 461.0860900878906, 311.488525390625, 258.9139099121094], "score": 0.05452142655849457}, {"image_id": 15, "category_id": 2, "bbox": [239.07766723632812, 94.26924896240234, 247.19424438476562, 354.0800552368164], "score": 0.052373941987752914}, {"image_id": 15, "category_id": 2, "bbox": [0.0, 89.50159454345703, 661.6312866210938, 612.9911422729492], "score": 0.051411863416433334}, {"image_id": 15, "category_id": 2, "bbox": [186.85540771484375, 56.892459869384766, 246.999755859375, 358.0553550720215], "score": 0.05023963749408722}, {"image_id": 15, "category_id": 2, "bbox": [281.9388122558594, 251.64102172851562, 307.5742492675781, 408.5929260253906], "score": 0.050086118280887604}, {"image_id": 15, "category_id": 2, "bbox": [223.4081268310547, 160.33311462402344, 282.22254943847656, 362.90943908691406], "score": 0.0476297065615654}, {"image_id": 15, "category_id": 2, "bbox": [658.0736083984375, 187.49932861328125, 292.34136962890625, 380.33837890625], "score": 0.04726174473762512}, {"image_id": 15, "category_id": 2, "bbox": [133.64352416992188, 305.9339294433594, 668.2834167480469, 414.0660705566406], "score": 0.046316999942064285}, {"image_id": 15, "category_id": 2, "bbox": [549.0213012695312, 148.6397705078125, 272.9393310546875, 391.9180908203125], "score": 0.04552553594112396}, {"image_id": 15, "category_id": 2, "bbox": [236.13111877441406, 317.9824523925781, 300.5738983154297, 402.0175476074219], "score": 0.045115090906620026}, {"image_id": 15, "category_id": 2, "bbox": [281.77911376953125, 123.06632232666016, 282.0045166015625, 365.99913787841797], "score": 0.0446002371609211}, {"image_id": 15, "category_id": 2, "bbox": [188.17013549804688, 0.0, 235.26116943359375, 310.4591979980469], "score": 0.04400898143649101}, {"image_id": 15, "category_id": 2, "bbox": [808.13916015625, 240.36045837402344, 299.2144775390625, 414.89332580566406], "score": 0.0436427928507328}, {"image_id": 15, "category_id": 2, "bbox": [581.2481689453125, 327.87652587890625, 210.89117431640625, 191.46270751953125], "score": 0.043601009994745255}, {"image_id": 15, "category_id": 2, "bbox": [697.707763671875, 474.8794860839844, 350.3204345703125, 245.12051391601562], "score": 0.04255115985870361}, {"image_id": 15, "category_id": 2, "bbox": [628.78515625, 518.4216918945312, 347.82470703125, 201.57830810546875], "score": 0.04239877685904503}, {"image_id": 15, "category_id": 2, "bbox": [88.44869995117188, 11.178245544433594, 710.3774719238281, 641.6105117797852], "score": 0.042146604508161545}, {"image_id": 15, "category_id": 2, "bbox": [578.297119140625, 383.3424072265625, 212.83056640625, 187.80230712890625], "score": 0.04184352606534958}, {"image_id": 15, "category_id": 2, "bbox": [722.5233154296875, 221.18222045898438, 297.9034423828125, 382.0157775878906], "score": 0.04174608737230301}, {"image_id": 15, "category_id": 2, "bbox": [450.6881408691406, 169.71717834472656, 710.1978454589844, 550.2828216552734], "score": 0.04142868518829346}, {"image_id": 15, "category_id": 2, "bbox": [837.1141357421875, 439.5083923339844, 345.6273193359375, 280.4916076660156], "score": 0.04138702526688576}, {"image_id": 15, "category_id": 2, "bbox": [415.9914245605469, 186.33502197265625, 295.5080871582031, 386.29736328125], "score": 0.04109892249107361}, {"image_id": 15, "category_id": 2, "bbox": [130.5014190673828, 0.0, 334.88255310058594, 236.37884521484375], "score": 0.04076342657208443}, {"image_id": 15, "category_id": 2, "bbox": [775.22509765625, 402.4207458496094, 318.502685546875, 317.5792541503906], "score": 0.040511343628168106}, {"image_id": 15, "category_id": 2, "bbox": [612.2684326171875, 103.06940460205078, 277.25128173828125, 409.1266403198242], "score": 0.03998826444149017}, {"image_id": 15, "category_id": 2, "bbox": [356.3211669921875, 191.79534912109375, 274.85418701171875, 377.2906494140625], "score": 0.03985557705163956}, {"image_id": 15, "category_id": 2, "bbox": [96.1666488647461, 0.0, 295.0486831665039, 344.1278381347656], "score": 0.03967832028865814}, {"image_id": 15, "category_id": 2, "bbox": [312.48681640625, 297.75677490234375, 685.5972290039062, 422.24322509765625], "score": 0.03920989856123924}, {"image_id": 15, "category_id": 3, "bbox": [529.3870849609375, 331.2480163574219, 268.50189208984375, 378.3884582519531], "score": 0.691390872001648}, {"image_id": 15, "category_id": 3, "bbox": [543.6611328125, 223.93408203125, 255.55487060546875, 372.31805419921875], "score": 0.34184443950653076}, {"image_id": 15, "category_id": 3, "bbox": [582.2289428710938, 303.04681396484375, 282.8043212890625, 388.683837890625], "score": 0.2516213357448578}, {"image_id": 15, "category_id": 3, "bbox": [471.23016357421875, 432.6015625, 307.1199951171875, 287.3984375], "score": 0.14880871772766113}, {"image_id": 15, "category_id": 3, "bbox": [388.874267578125, 323.3922424316406, 347.72802734375, 396.6077575683594], "score": 0.14669276773929596}, {"image_id": 15, "category_id": 3, "bbox": [272.8881530761719, 181.3797149658203, 582.0373840332031, 538.6202850341797], "score": 0.13475894927978516}, {"image_id": 15, "category_id": 3, "bbox": [326.65277099609375, 404.7344970703125, 315.369140625, 315.2655029296875], "score": 0.11447262763977051}, {"image_id": 15, "category_id": 3, "bbox": [404.2845458984375, 447.97967529296875, 282.180419921875, 272.02032470703125], "score": 0.0965421199798584}, {"image_id": 15, "category_id": 3, "bbox": [902.0829467773438, 237.55178833007812, 245.85150146484375, 418.2108459472656], "score": 0.08551351726055145}, {"image_id": 15, "category_id": 3, "bbox": [133.64352416992188, 305.9339294433594, 668.2834167480469, 414.0660705566406], "score": 0.07901442795991898}, {"image_id": 15, "category_id": 3, "bbox": [658.291748046875, 351.51617431640625, 286.849365234375, 368.48382568359375], "score": 0.07202675193548203}, {"image_id": 15, "category_id": 3, "bbox": [0.0, 243.88796997070312, 668.7788696289062, 476.1120300292969], "score": 0.07018254697322845}, {"image_id": 15, "category_id": 3, "bbox": [88.44869995117188, 11.178245544433594, 710.3774719238281, 641.6105117797852], "score": 0.06777668744325638}, {"image_id": 15, "category_id": 3, "bbox": [581.62646484375, 345.6606750488281, 208.76287841796875, 189.74411010742188], "score": 0.06776521354913712}, {"image_id": 15, "category_id": 3, "bbox": [599.4840698242188, 189.51576232910156, 271.04534912109375, 376.0597381591797], "score": 0.06774213910102844}, {"image_id": 15, "category_id": 3, "bbox": [371.8294982910156, 71.2218246459961, 601.5850524902344, 623.6659927368164], "score": 0.06741147488355637}, {"image_id": 15, "category_id": 3, "bbox": [261.2214050292969, 390.9271240234375, 606.9401550292969, 329.0728759765625], "score": 0.06676535308361053}, {"image_id": 15, "category_id": 3, "bbox": [590.0900268554688, 425.8714599609375, 289.9569091796875, 294.1285400390625], "score": 0.06618952006101608}, {"image_id": 15, "category_id": 3, "bbox": [548.5706176757812, 436.0364685058594, 210.28363037109375, 188.82302856445312], "score": 0.06031597778201103}, {"image_id": 15, "category_id": 3, "bbox": [297.5416564941406, 488.2881164550781, 211.805908203125, 195.02822875976562], "score": 0.05600598454475403}, {"image_id": 15, "category_id": 3, "bbox": [450.6881408691406, 169.71717834472656, 710.1978454589844, 550.2828216552734], "score": 0.054908156394958496}, {"image_id": 15, "category_id": 3, "bbox": [609.6263427734375, 382.98358154296875, 212.8228759765625, 188.7855224609375], "score": 0.05300630256533623}, {"image_id": 15, "category_id": 3, "bbox": [646.0736694335938, 349.1755065917969, 209.53472900390625, 182.09225463867188], "score": 0.05069073662161827}, {"image_id": 15, "category_id": 3, "bbox": [253.0294952392578, 405.9132385253906, 288.8655242919922, 314.0867614746094], "score": 0.04960871487855911}, {"image_id": 15, "category_id": 3, "bbox": [547.6046142578125, 385.67083740234375, 213.90625, 182.14556884765625], "score": 0.04802967980504036}, {"image_id": 15, "category_id": 3, "bbox": [1016.231689453125, 0.0, 247.6005859375, 409.560791015625], "score": 0.04603387787938118}, {"image_id": 15, "category_id": 3, "bbox": [616.399169921875, 314.522705078125, 208.34088134765625, 180.7152099609375], "score": 0.0453033447265625}, {"image_id": 15, "category_id": 3, "bbox": [884.0531616210938, 152.98733520507812, 254.31829833984375, 379.1787414550781], "score": 0.0446392297744751}, {"image_id": 15, "category_id": 3, "bbox": [355.94635009765625, 476.0451354980469, 218.97894287109375, 184.74429321289062], "score": 0.042012374848127365}, {"image_id": 15, "category_id": 3, "bbox": [292.037109375, 543.4918823242188, 215.54733276367188, 176.50811767578125], "score": 0.04176023229956627}, {"image_id": 15, "category_id": 3, "bbox": [512.984130859375, 477.1334228515625, 216.2867431640625, 178.5518798828125], "score": 0.04073350131511688}, {"image_id": 15, "category_id": 3, "bbox": [484.6168518066406, 440.55999755859375, 214.32101440429688, 180.62860107421875], "score": 0.04067563638091087}, {"image_id": 15, "category_id": 3, "bbox": [546.8716430664062, 547.8382568359375, 214.717041015625, 172.1617431640625], "score": 0.04046627879142761}, {"image_id": 15, "category_id": 3, "bbox": [430.7252197265625, 405.0920104980469, 761.9312744140625, 314.9079895019531], "score": 0.03909771516919136}, {"image_id": 15, "category_id": 3, "bbox": [517.6163940429688, 351.2518615722656, 215.26751708984375, 181.76583862304688], "score": 0.03849874436855316}, {"image_id": 15, "category_id": 3, "bbox": [449.2218933105469, 477.04681396484375, 216.10354614257812, 180.36517333984375], "score": 0.038385454565286636}, {"image_id": 16, "category_id": 1, "bbox": [336.86309814453125, 275.3941955566406, 182.01409912109375, 358.0639953613281], "score": 0.4532265067100525}, {"image_id": 16, "category_id": 1, "bbox": [356.43994140625, 271.8867492675781, 248.68359375, 357.7560729980469], "score": 0.28949305415153503}, {"image_id": 16, "category_id": 1, "bbox": [936.4114990234375, 169.1969757080078, 227.7354736328125, 426.5525360107422], "score": 0.11135156452655792}, {"image_id": 16, "category_id": 1, "bbox": [401.84368896484375, 414.6866455078125, 327.62359619140625, 305.3133544921875], "score": 0.059448059648275375}, {"image_id": 16, "category_id": 1, "bbox": [17.5694580078125, 14.513385772705078, 607.109375, 625.4094657897949], "score": 0.05567023158073425}, {"image_id": 16, "category_id": 1, "bbox": [271.7882080078125, 221.3467559814453, 235.91180419921875, 386.4899139404297], "score": 0.04918549209833145}, {"image_id": 16, "category_id": 1, "bbox": [684.7286376953125, 181.5120086669922, 533.11328125, 538.4879913330078], "score": 0.04656573012471199}, {"image_id": 16, "category_id": 1, "bbox": [909.707763671875, 265.7884826660156, 220.536865234375, 415.7674865722656], "score": 0.04563073813915253}, {"image_id": 16, "category_id": 1, "bbox": [878.600830078125, 152.4204559326172, 252.5316162109375, 396.1447296142578], "score": 0.0425698384642601}, {"image_id": 16, "category_id": 1, "bbox": [399.34454345703125, 222.5753631591797, 282.8597412109375, 391.6552276611328], "score": 0.04237351939082146}, {"image_id": 16, "category_id": 1, "bbox": [551.972900390625, 35.72273254394531, 630.121337890625, 594.5267791748047], "score": 0.041576605290174484}, {"image_id": 16, "category_id": 1, "bbox": [202.335205078125, 21.12993621826172, 716.3572387695312, 614.203254699707], "score": 0.04029516130685806}, {"image_id": 16, "category_id": 1, "bbox": [800.4441528320312, 163.47369384765625, 297.21282958984375, 427.63970947265625], "score": 0.03942584618926048}, {"image_id": 16, "category_id": 1, "bbox": [0.0, 276.3784484863281, 270.8262939453125, 412.7174377441406], "score": 0.0393291711807251}, {"image_id": 16, "category_id": 1, "bbox": [1002.0093383789062, 175.5413055419922, 262.16839599609375, 414.1721954345703], "score": 0.03663625568151474}, {"image_id": 16, "category_id": 2, "bbox": [457.9722900390625, 417.00262451171875, 279.20989990234375, 302.99737548828125], "score": 0.6361801624298096}, {"image_id": 16, "category_id": 2, "bbox": [365.787109375, 414.2658386230469, 309.29541015625, 305.7341613769531], "score": 0.5591358542442322}, {"image_id": 16, "category_id": 2, "bbox": [395.73638916015625, 335.14208984375, 337.08685302734375, 364.206787109375], "score": 0.46016815304756165}, {"image_id": 16, "category_id": 2, "bbox": [535.4012451171875, 389.65155029296875, 301.7913818359375, 330.34844970703125], "score": 0.1609506607055664}, {"image_id": 16, "category_id": 2, "bbox": [479.29925537109375, 249.29598999023438, 276.1229248046875, 470.4782409667969], "score": 0.09776066243648529}, {"image_id": 16, "category_id": 2, "bbox": [930.8110961914062, 231.670166015625, 230.34393310546875, 416.19744873046875], "score": 0.08111707866191864}, {"image_id": 16, "category_id": 2, "bbox": [653.4285888671875, 397.59796142578125, 317.6807861328125, 322.40203857421875], "score": 0.07645121961832047}, {"image_id": 16, "category_id": 2, "bbox": [319.212646484375, 324.60308837890625, 248.33447265625, 395.39691162109375], "score": 0.07492896169424057}, {"image_id": 16, "category_id": 2, "bbox": [725.1385498046875, 362.1277770996094, 305.6890869140625, 357.8722229003906], "score": 0.07194596529006958}, {"image_id": 16, "category_id": 2, "bbox": [972.4703369140625, 194.65634155273438, 293.7115478515625, 428.5024719238281], "score": 0.06767643243074417}, {"image_id": 16, "category_id": 2, "bbox": [794.990234375, 227.47120666503906, 305.00390625, 428.9591522216797], "score": 0.06695469468832016}, {"image_id": 16, "category_id": 2, "bbox": [591.7352294921875, 324.1864318847656, 296.2667236328125, 395.2792053222656], "score": 0.06472697108983994}, {"image_id": 16, "category_id": 2, "bbox": [906.8056640625, 312.38580322265625, 216.708740234375, 395.55108642578125], "score": 0.06467036157846451}, {"image_id": 16, "category_id": 2, "bbox": [538.7858276367188, 254.57003784179688, 271.28021240234375, 393.1874084472656], "score": 0.06426616758108139}, {"image_id": 16, "category_id": 2, "bbox": [883.3484497070312, 168.810302734375, 255.81048583984375, 426.17364501953125], "score": 0.0638531967997551}, {"image_id": 16, "category_id": 2, "bbox": [336.86309814453125, 275.3941955566406, 182.01409912109375, 358.0639953613281], "score": 0.06283324956893921}, {"image_id": 16, "category_id": 2, "bbox": [594.1566772460938, 469.7136535644531, 318.9403076171875, 250.28634643554688], "score": 0.06036137044429779}, {"image_id": 16, "category_id": 2, "bbox": [745.5154418945312, 201.99488830566406, 288.24151611328125, 416.2689666748047], "score": 0.057865239679813385}, {"image_id": 16, "category_id": 2, "bbox": [220.65390014648438, 392.3861389160156, 310.2367858886719, 327.6138610839844], "score": 0.055964693427085876}, {"image_id": 16, "category_id": 2, "bbox": [781.6707763671875, 427.7825927734375, 322.0291748046875, 292.2174072265625], "score": 0.05590575933456421}, {"image_id": 16, "category_id": 2, "bbox": [356.43994140625, 271.8867492675781, 248.68359375, 357.7560729980469], "score": 0.05395525321364403}, {"image_id": 16, "category_id": 2, "bbox": [675.7922973632812, 251.88365173339844, 282.22198486328125, 389.66627502441406], "score": 0.052868567407131195}, {"image_id": 16, "category_id": 2, "bbox": [281.220703125, 462.01190185546875, 308.0623779296875, 257.98809814453125], "score": 0.05084463953971863}, {"image_id": 16, "category_id": 2, "bbox": [703.742431640625, 511.1883850097656, 348.201904296875, 208.81161499023438], "score": 0.04826437681913376}, {"image_id": 16, "category_id": 2, "bbox": [1020.9027099609375, 132.7636260986328, 233.7257080078125, 369.71937561035156], "score": 0.04783732816576958}, {"image_id": 16, "category_id": 2, "bbox": [519.8169555664062, 418.05242919921875, 209.25189208984375, 189.924072265625], "score": 0.04720707982778549}, {"image_id": 16, "category_id": 2, "bbox": [399.34454345703125, 222.5753631591797, 282.8597412109375, 391.6552276611328], "score": 0.047044944018125534}, {"image_id": 16, "category_id": 2, "bbox": [471.11236572265625, 185.4452667236328, 282.96826171875, 392.2163543701172], "score": 0.04663538932800293}, {"image_id": 16, "category_id": 2, "bbox": [315.0273132324219, 108.50296783447266, 233.92532348632812, 404.08748626708984], "score": 0.04635488614439964}, {"image_id": 16, "category_id": 2, "bbox": [447.81396484375, 527.0492553710938, 219.83251953125, 188.904052734375], "score": 0.04597407206892967}, {"image_id": 16, "category_id": 2, "bbox": [352.0836181640625, 131.30615234375, 255.244384765625, 431.55401611328125], "score": 0.04441070556640625}, {"image_id": 16, "category_id": 2, "bbox": [944.4085083007812, 129.99960327148438, 221.58831787109375, 371.6004638671875], "score": 0.043791983276605606}, {"image_id": 16, "category_id": 2, "bbox": [720.2982788085938, 0.0, 316.81329345703125, 316.8418273925781], "score": 0.04349201172590256}, {"image_id": 16, "category_id": 2, "bbox": [612.4384765625, 218.89808654785156, 267.19140625, 388.9846649169922], "score": 0.042292434722185135}, {"image_id": 16, "category_id": 2, "bbox": [386.8273010253906, 521.62890625, 213.69137573242188, 198.37109375], "score": 0.042204421013593674}, {"image_id": 16, "category_id": 2, "bbox": [66.03973388671875, 0.0, 347.2804260253906, 243.72512817382812], "score": 0.03963669762015343}, {"image_id": 16, "category_id": 2, "bbox": [261.1946105957031, 175.518310546875, 249.30990600585938, 404.78521728515625], "score": 0.039316728711128235}, {"image_id": 16, "category_id": 2, "bbox": [802.9979248046875, 114.15443420410156, 288.147705078125, 398.7086639404297], "score": 0.039073869585990906}, {"image_id": 16, "category_id": 2, "bbox": [105.81658935546875, 0.0, 692.734619140625, 328.31744384765625], "score": 0.0390596017241478}, {"image_id": 16, "category_id": 2, "bbox": [622.899169921875, 0.0, 254.1527099609375, 319.5589599609375], "score": 0.03903106600046158}, {"image_id": 16, "category_id": 2, "bbox": [735.050537109375, 105.27195739746094, 285.89306640625, 408.73158264160156], "score": 0.038333311676979065}, {"image_id": 16, "category_id": 2, "bbox": [849.8811645507812, 467.32244873046875, 336.16314697265625, 252.67755126953125], "score": 0.0382651761174202}, {"image_id": 16, "category_id": 2, "bbox": [765.616943359375, 0.0, 344.47607421875, 234.67825317382812], "score": 0.0377642884850502}, {"image_id": 16, "category_id": 2, "bbox": [1037.49853515625, 263.35308837890625, 242.50146484375, 416.92718505859375], "score": 0.03745715320110321}, {"image_id": 16, "category_id": 2, "bbox": [137.5826416015625, 432.8229675292969, 329.37445068359375, 287.1770324707031], "score": 0.03737988695502281}, {"image_id": 16, "category_id": 2, "bbox": [418.1748046875, 491.5243225097656, 220.2408447265625, 186.64382934570312], "score": 0.03712930157780647}, {"image_id": 16, "category_id": 2, "bbox": [648.9322509765625, 0.0, 309.8328857421875, 237.80026245117188], "score": 0.036454275250434875}, {"image_id": 16, "category_id": 2, "bbox": [565.7615966796875, 0.0, 610.9827880859375, 572.5966186523438], "score": 0.03645290434360504}, {"image_id": 16, "category_id": 2, "bbox": [227.05703735351562, 111.88555145263672, 265.5568542480469, 383.0737075805664], "score": 0.03602749481797218}, {"image_id": 16, "category_id": 2, "bbox": [706.03515625, 0.0, 344.3853759765625, 193.7202606201172], "score": 0.035602834075689316}, {"image_id": 16, "category_id": 3, "bbox": [405.9984436035156, 378.3787536621094, 320.9314270019531, 341.6212463378906], "score": 0.4211910665035248}, {"image_id": 16, "category_id": 3, "bbox": [930.8110961914062, 231.670166015625, 230.34393310546875, 416.19744873046875], "score": 0.1669294536113739}, {"image_id": 16, "category_id": 3, "bbox": [335.861083984375, 232.75155639648438, 181.66949462890625, 371.7162170410156], "score": 0.09342826902866364}, {"image_id": 16, "category_id": 3, "bbox": [883.3484497070312, 168.810302734375, 255.81048583984375, 426.17364501953125], "score": 0.07997912168502808}, {"image_id": 16, "category_id": 3, "bbox": [386.8273010253906, 521.62890625, 213.69137573242188, 198.37109375], "score": 0.07257293909788132}, {"image_id": 16, "category_id": 3, "bbox": [359.4886169433594, 299.79559326171875, 249.65646362304688, 369.82379150390625], "score": 0.07198755443096161}, {"image_id": 16, "category_id": 3, "bbox": [906.8056640625, 312.38580322265625, 216.708740234375, 395.55108642578125], "score": 0.0667639672756195}, {"image_id": 16, "category_id": 3, "bbox": [963.1400146484375, 232.74038696289062, 299.8017578125, 412.2821350097656], "score": 0.06576235592365265}, {"image_id": 16, "category_id": 3, "bbox": [519.0089111328125, 437.6493835449219, 207.91436767578125, 186.29440307617188], "score": 0.060757625848054886}, {"image_id": 16, "category_id": 3, "bbox": [348.8314514160156, 187.60841369628906, 265.8408508300781, 389.27403259277344], "score": 0.0589279979467392}, {"image_id": 16, "category_id": 3, "bbox": [320.99945068359375, 526.6847534179688, 217.35028076171875, 189.72161865234375], "score": 0.05319904908537865}, {"image_id": 16, "category_id": 3, "bbox": [447.81396484375, 527.0492553710938, 219.83251953125, 188.904052734375], "score": 0.04935244470834732}, {"image_id": 16, "category_id": 3, "bbox": [425.46832275390625, 472.29229736328125, 213.6357421875, 188.88031005859375], "score": 0.04873520880937576}, {"image_id": 16, "category_id": 3, "bbox": [794.990234375, 227.47120666503906, 305.00390625, 428.9591522216797], "score": 0.04835868999361992}, {"image_id": 16, "category_id": 3, "bbox": [1016.131103515625, 160.37937927246094, 246.8511962890625, 383.92970275878906], "score": 0.04770198091864586}, {"image_id": 16, "category_id": 3, "bbox": [331.1636657714844, 312.9640197753906, 201.40267944335938, 407.0359802246094], "score": 0.04739760607481003}, {"image_id": 16, "category_id": 3, "bbox": [357.3717041015625, 492.2439880371094, 214.56048583984375, 186.74252319335938], "score": 0.04739305004477501}, {"image_id": 16, "category_id": 3, "bbox": [450.6856689453125, 420.5836181640625, 215.4737548828125, 184.08184814453125], "score": 0.046001143753528595}, {"image_id": 16, "category_id": 3, "bbox": [583.8054809570312, 440.249267578125, 213.63873291015625, 182.322021484375], "score": 0.04221324995160103}, {"image_id": 16, "category_id": 3, "bbox": [548.8372802734375, 472.8526306152344, 212.05859375, 189.03469848632812], "score": 0.0420689657330513}, {"image_id": 16, "category_id": 3, "bbox": [365.4671936035156, 313.1797790527344, 210.34573364257812, 180.0218505859375], "score": 0.041613269597291946}, {"image_id": 16, "category_id": 3, "bbox": [899.520751953125, 332.0851135253906, 212.734619140625, 178.5465087890625], "score": 0.04136278107762337}, {"image_id": 16, "category_id": 3, "bbox": [965.2262573242188, 332.80938720703125, 211.18475341796875, 177.24432373046875], "score": 0.04110611975193024}, {"image_id": 16, "category_id": 3, "bbox": [835.3028564453125, 335.9264831542969, 213.0726318359375, 173.02999877929688], "score": 0.04023773968219757}, {"image_id": 16, "category_id": 3, "bbox": [483.7598876953125, 475.65753173828125, 217.28717041015625, 182.59088134765625], "score": 0.04016151651740074}, {"image_id": 16, "category_id": 3, "bbox": [411.62158203125, 584.3528442382812, 223.809814453125, 135.64715576171875], "score": 0.038827426731586456}, {"image_id": 16, "category_id": 3, "bbox": [387.5108642578125, 459.0054626464844, 213.3004150390625, 179.81729125976562], "score": 0.03873821347951889}, {"image_id": 16, "category_id": 3, "bbox": [930.19189453125, 297.8472595214844, 215.140380859375, 176.24334716796875], "score": 0.03796377032995224}, {"image_id": 16, "category_id": 3, "bbox": [297.7579650878906, 314.12872314453125, 211.72988891601562, 179.17938232421875], "score": 0.03742797300219536}, {"image_id": 16, "category_id": 3, "bbox": [350.2870788574219, 586.2660522460938, 221.14724731445312, 133.73394775390625], "score": 0.037316616624593735}, {"image_id": 16, "category_id": 3, "bbox": [550.9598388671875, 406.3299865722656, 209.955810546875, 178.20431518554688], "score": 0.036985576152801514}, {"image_id": 16, "category_id": 3, "bbox": [995.4751586914062, 302.53485107421875, 216.07415771484375, 165.53372192382812], "score": 0.03689828887581825}, {"image_id": 16, "category_id": 3, "bbox": [256.67205810546875, 549.798583984375, 217.94400024414062, 170.201416015625], "score": 0.036475714296102524}, {"image_id": 16, "category_id": 3, "bbox": [356.695068359375, 421.8201904296875, 214.12701416015625, 180.73980712890625], "score": 0.035710178315639496}, {"image_id": 16, "category_id": 3, "bbox": [321.9405517578125, 459.6435852050781, 215.26397705078125, 178.55661010742188], "score": 0.03566273674368858}, {"image_id": 17, "category_id": 1, "bbox": [639.969970703125, 161.40650939941406, 212.0770263671875, 366.4171600341797], "score": 0.7157062292098999}, {"image_id": 17, "category_id": 1, "bbox": [713.7442016601562, 0.0, 191.927734375, 304.2700500488281], "score": 0.20574510097503662}, {"image_id": 17, "category_id": 1, "bbox": [703.7232666015625, 24.22433853149414, 208.80230712890625, 431.1375389099121], "score": 0.15677635371685028}, {"image_id": 17, "category_id": 1, "bbox": [935.2073364257812, 169.26023864746094, 232.43243408203125, 424.28968811035156], "score": 0.11661647260189056}, {"image_id": 17, "category_id": 1, "bbox": [629.4063110351562, 335.4634704589844, 257.92633056640625, 350.2262878417969], "score": 0.10340486466884613}, {"image_id": 17, "category_id": 1, "bbox": [749.559326171875, 0.0, 241.2904052734375, 339.8282165527344], "score": 0.09998531639575958}, {"image_id": 17, "category_id": 1, "bbox": [649.9315795898438, 1.5781688690185547, 234.62567138671875, 405.3778553009033], "score": 0.07353632152080536}, {"image_id": 17, "category_id": 1, "bbox": [220.46530151367188, 16.119415283203125, 706.1587219238281, 646.5362854003906], "score": 0.07246625423431396}, {"image_id": 17, "category_id": 1, "bbox": [1062.8057861328125, 0.0, 217.1942138671875, 405.80230712890625], "score": 0.061979375779628754}, {"image_id": 17, "category_id": 1, "bbox": [0.0, 12.36947250366211, 673.8937377929688, 643.7549171447754], "score": 0.05940578505396843}, {"image_id": 17, "category_id": 1, "bbox": [0.0, 249.1837158203125, 272.4990234375, 405.008056640625], "score": 0.05263384431600571}, {"image_id": 17, "category_id": 1, "bbox": [898.4617309570312, 277.068115234375, 244.45501708984375, 401.90557861328125], "score": 0.045380767434835434}, {"image_id": 17, "category_id": 1, "bbox": [172.5482940673828, 8.043331146240234, 238.5835418701172, 367.1763343811035], "score": 0.04498082026839256}, {"image_id": 17, "category_id": 1, "bbox": [971.9525146484375, 171.70254516601562, 288.626708984375, 421.0378723144531], "score": 0.043765194714069366}, {"image_id": 17, "category_id": 2, "bbox": [630.9801025390625, 304.4479064941406, 256.20867919921875, 350.9513854980469], "score": 0.5174615979194641}, {"image_id": 17, "category_id": 2, "bbox": [577.1460571289062, 356.1707458496094, 290.5831298828125, 363.8292541503906], "score": 0.3356853425502777}, {"image_id": 17, "category_id": 2, "bbox": [486.0220947265625, 315.5044860839844, 304.77557373046875, 399.0628356933594], "score": 0.15924642980098724}, {"image_id": 17, "category_id": 2, "bbox": [714.7317504882812, 322.758544921875, 298.15625, 388.2252197265625], "score": 0.1313621550798416}, {"image_id": 17, "category_id": 2, "bbox": [653.6527709960938, 378.295654296875, 278.6302490234375, 341.704345703125], "score": 0.10013458877801895}, {"image_id": 17, "category_id": 2, "bbox": [635.8159790039062, 119.80559539794922, 223.22113037109375, 392.0254898071289], "score": 0.09184145927429199}, {"image_id": 17, "category_id": 2, "bbox": [784.444580078125, 285.92376708984375, 310.5576171875, 384.984619140625], "score": 0.08902592957019806}, {"image_id": 17, "category_id": 2, "bbox": [894.9860229492188, 196.26048278808594, 260.03582763671875, 431.98939514160156], "score": 0.083260178565979}, {"image_id": 17, "category_id": 2, "bbox": [405.90252685546875, 327.72161865234375, 315.02899169921875, 377.632568359375], "score": 0.0821080356836319}, {"image_id": 17, "category_id": 2, "bbox": [717.9171142578125, 224.39137268066406, 297.1212158203125, 379.5074920654297], "score": 0.0792774185538292}, {"image_id": 17, "category_id": 2, "bbox": [171.41940307617188, 0.0, 246.62545776367188, 337.21978759765625], "score": 0.07741939276456833}, {"image_id": 17, "category_id": 2, "bbox": [583.5745239257812, 182.13357543945312, 270.9461669921875, 450.6684875488281], "score": 0.07223288714885712}, {"image_id": 17, "category_id": 2, "bbox": [270.70709228515625, 4.814603805541992, 301.80389404296875, 378.20532417297363], "score": 0.07145408540964127}, {"image_id": 17, "category_id": 2, "bbox": [197.48268127441406, 30.66012954711914, 293.50706481933594, 388.483699798584], "score": 0.06645352393388748}, {"image_id": 17, "category_id": 2, "bbox": [953.2916259765625, 126.78662109375, 211.3280029296875, 376.10894775390625], "score": 0.06536447256803513}, {"image_id": 17, "category_id": 2, "bbox": [650.2095947265625, 190.6712646484375, 254.9752197265625, 366.7265625], "score": 0.06260684132575989}, {"image_id": 17, "category_id": 2, "bbox": [865.6744995117188, 83.19693756103516, 271.80584716796875, 386.0145797729492], "score": 0.0585046149790287}, {"image_id": 17, "category_id": 2, "bbox": [717.5349731445312, 457.3734436035156, 321.51715087890625, 262.6265563964844], "score": 0.058368369936943054}, {"image_id": 17, "category_id": 2, "bbox": [166.24380493164062, 74.64262390136719, 251.68399047851562, 374.2623748779297], "score": 0.05669455975294113}, {"image_id": 17, "category_id": 2, "bbox": [225.90716552734375, 86.00993347167969, 713.5375366210938, 609.7153472900391], "score": 0.05652741715312004}, {"image_id": 17, "category_id": 2, "bbox": [749.559326171875, 0.0, 241.2904052734375, 339.8282165527344], "score": 0.05514882132411003}, {"image_id": 17, "category_id": 2, "bbox": [784.1671142578125, 178.33700561523438, 303.2344970703125, 391.2945251464844], "score": 0.055009666830301285}, {"image_id": 17, "category_id": 2, "bbox": [506.117431640625, 202.08445739746094, 295.8721923828125, 400.1117706298828], "score": 0.054927535355091095}, {"image_id": 17, "category_id": 2, "bbox": [715.8388671875, 0.0, 185.0753173828125, 344.12493896484375], "score": 0.054640788584947586}, {"image_id": 17, "category_id": 2, "bbox": [806.4759521484375, 36.967037200927734, 259.1456298828125, 401.44714736938477], "score": 0.0536687895655632}, {"image_id": 17, "category_id": 2, "bbox": [665.4571533203125, 27.950180053710938, 260.6925048828125, 487.03688049316406], "score": 0.053559597581624985}, {"image_id": 17, "category_id": 2, "bbox": [851.7698974609375, 432.2514953613281, 327.548583984375, 287.7485046386719], "score": 0.05186959356069565}, {"image_id": 17, "category_id": 2, "bbox": [494.26812744140625, 82.8377685546875, 294.91619873046875, 446.00677490234375], "score": 0.05167414993047714}, {"image_id": 17, "category_id": 2, "bbox": [888.5569458007812, 318.78765869140625, 246.90643310546875, 388.46478271484375], "score": 0.0500541515648365}, {"image_id": 17, "category_id": 2, "bbox": [931.2529907226562, 44.27381896972656, 253.06597900390625, 389.60023498535156], "score": 0.049509596079587936}, {"image_id": 17, "category_id": 2, "bbox": [971.9525146484375, 171.70254516601562, 288.626708984375, 421.0378723144531], "score": 0.04890446364879608}, {"image_id": 17, "category_id": 2, "bbox": [567.5257568359375, 28.134441375732422, 272.3167724609375, 422.8055610656738], "score": 0.045953068882226944}, {"image_id": 17, "category_id": 2, "bbox": [337.26605224609375, 290.9526672363281, 320.52227783203125, 390.6483459472656], "score": 0.04594787582755089}, {"image_id": 17, "category_id": 2, "bbox": [70.4974365234375, 0.0, 333.2474365234375, 272.85894775390625], "score": 0.04589153453707695}, {"image_id": 17, "category_id": 2, "bbox": [1012.574462890625, 88.14291381835938, 241.441162109375, 383.6963195800781], "score": 0.0455973744392395}, {"image_id": 17, "category_id": 2, "bbox": [192.00619506835938, 0.0, 312.0709533691406, 278.24749755859375], "score": 0.04557652771472931}, {"image_id": 17, "category_id": 2, "bbox": [649.9315795898438, 1.5781688690185547, 234.62567138671875, 405.3778553009033], "score": 0.04528730362653732}, {"image_id": 17, "category_id": 2, "bbox": [10.517120361328125, 38.78850173950195, 299.5752258300781, 380.0562858581543], "score": 0.04528297483921051}, {"image_id": 17, "category_id": 2, "bbox": [731.576171875, 65.81037139892578, 268.66357421875, 421.1527633666992], "score": 0.04505917429924011}, {"image_id": 17, "category_id": 2, "bbox": [642.6970825195312, 349.2799377441406, 213.14508056640625, 179.75234985351562], "score": 0.044898033142089844}, {"image_id": 17, "category_id": 2, "bbox": [329.13507080078125, 0.0, 321.82098388671875, 307.01190185546875], "score": 0.04432306066155434}, {"image_id": 17, "category_id": 2, "bbox": [645.3021240234375, 398.8849792480469, 211.345458984375, 188.41348266601562], "score": 0.044149335473775864}, {"image_id": 17, "category_id": 2, "bbox": [934.9067993164062, 271.3568115234375, 240.04302978515625, 405.5218505859375], "score": 0.04301426559686661}, {"image_id": 17, "category_id": 2, "bbox": [507.5098876953125, 469.2772216796875, 368.087890625, 250.7227783203125], "score": 0.042834002524614334}, {"image_id": 17, "category_id": 2, "bbox": [610.6517944335938, 454.7460021972656, 216.77313232421875, 187.66073608398438], "score": 0.04094582423567772}, {"image_id": 17, "category_id": 2, "bbox": [257.7956848144531, 0.0, 323.9141540527344, 235.07203674316406], "score": 0.04053114354610443}, {"image_id": 17, "category_id": 2, "bbox": [101.74837493896484, 112.40280151367188, 271.38672637939453, 381.11151123046875], "score": 0.03994482010602951}, {"image_id": 17, "category_id": 2, "bbox": [0.0, 85.29908752441406, 683.4906005859375, 603.5602874755859], "score": 0.039892133325338364}, {"image_id": 17, "category_id": 3, "bbox": [630.9801025390625, 304.4479064941406, 256.20867919921875, 350.9513854980469], "score": 0.5536020398139954}, {"image_id": 17, "category_id": 3, "bbox": [639.969970703125, 161.40650939941406, 212.0770263671875, 366.4171600341797], "score": 0.22279545664787292}, {"image_id": 17, "category_id": 3, "bbox": [648.2750854492188, 360.5600280761719, 272.77349853515625, 359.4399719238281], "score": 0.19095320999622345}, {"image_id": 17, "category_id": 3, "bbox": [172.5482940673828, 8.043331146240234, 238.5835418701172, 367.1763343811035], "score": 0.16645850241184235}, {"image_id": 17, "category_id": 3, "bbox": [935.2073364257812, 169.26023864746094, 232.43243408203125, 424.28968811035156], "score": 0.13749413192272186}, {"image_id": 17, "category_id": 3, "bbox": [196.51524353027344, 0.0, 302.51576232910156, 341.0050354003906], "score": 0.07922226190567017}, {"image_id": 17, "category_id": 3, "bbox": [709.6200561523438, 261.689697265625, 307.89202880859375, 370.582275390625], "score": 0.07816632091999054}, {"image_id": 17, "category_id": 3, "bbox": [898.4617309570312, 277.068115234375, 244.45501708984375, 401.90557861328125], "score": 0.06438654661178589}, {"image_id": 17, "category_id": 3, "bbox": [713.7442016601562, 0.0, 191.927734375, 304.2700500488281], "score": 0.06319107115268707}, {"image_id": 17, "category_id": 3, "bbox": [220.46530151367188, 16.119415283203125, 706.1587219238281, 646.5362854003906], "score": 0.061385150998830795}, {"image_id": 17, "category_id": 3, "bbox": [614.8065185546875, 348.57147216796875, 210.50653076171875, 179.80682373046875], "score": 0.05763237550854683}, {"image_id": 17, "category_id": 3, "bbox": [270.70709228515625, 4.814603805541992, 301.80389404296875, 378.20532417297363], "score": 0.05448024347424507}, {"image_id": 17, "category_id": 3, "bbox": [677.8447875976562, 350.2130432128906, 208.91082763671875, 178.68417358398438], "score": 0.054262783378362656}, {"image_id": 17, "category_id": 3, "bbox": [645.47607421875, 417.91058349609375, 208.921875, 186.6566162109375], "score": 0.053319111466407776}, {"image_id": 17, "category_id": 3, "bbox": [545.8950805664062, 456.78607177734375, 216.73193359375, 184.26055908203125], "score": 0.049809250980615616}, {"image_id": 17, "category_id": 3, "bbox": [534.0252685546875, 107.39248657226562, 584.8946533203125, 585.5376281738281], "score": 0.04918060451745987}, {"image_id": 17, "category_id": 3, "bbox": [745.5537719726562, 0.0, 245.9190673828125, 303.872314453125], "score": 0.049087103456258774}, {"image_id": 17, "category_id": 3, "bbox": [197.490234375, 64.81208801269531, 308.07598876953125, 394.00917053222656], "score": 0.04856761917471886}, {"image_id": 17, "category_id": 3, "bbox": [644.5038452148438, 473.1451110839844, 212.9134521484375, 187.43228149414062], "score": 0.04817144572734833}, {"image_id": 17, "category_id": 3, "bbox": [741.2021484375, 368.3436279296875, 212.21832275390625, 177.15264892578125], "score": 0.04730736091732979}, {"image_id": 17, "category_id": 3, "bbox": [579.9584350585938, 420.83782958984375, 212.67706298828125, 182.59136962890625], "score": 0.047271545976400375}, {"image_id": 17, "category_id": 3, "bbox": [125.73802185058594, 80.34439086914062, 242.9285430908203, 371.5462951660156], "score": 0.046917397528886795}, {"image_id": 17, "category_id": 3, "bbox": [784.444580078125, 285.92376708984375, 310.5576171875, 384.984619140625], "score": 0.04621605947613716}, {"image_id": 17, "category_id": 3, "bbox": [971.9525146484375, 171.70254516601562, 288.626708984375, 421.0378723144531], "score": 0.04553968459367752}, {"image_id": 17, "category_id": 3, "bbox": [68.28518676757812, 183.92642211914062, 760.1006164550781, 536.0735778808594], "score": 0.0444181002676487}, {"image_id": 17, "category_id": 3, "bbox": [946.100341796875, 86.7597885131836, 225.35888671875, 382.72962188720703], "score": 0.04431023821234703}, {"image_id": 17, "category_id": 3, "bbox": [673.0211181640625, 294.3951416015625, 211.7867431640625, 183.25991821289062], "score": 0.04409076273441315}, {"image_id": 17, "category_id": 3, "bbox": [0.0, 12.36947250366211, 673.8937377929688, 643.7549171447754], "score": 0.04342995584011078}, {"image_id": 17, "category_id": 3, "bbox": [784.1671142578125, 178.33700561523438, 303.2344970703125, 391.2945251464844], "score": 0.04302601516246796}, {"image_id": 17, "category_id": 3, "bbox": [876.5825805664062, 118.92588806152344, 266.08087158203125, 390.29615783691406], "score": 0.04283034801483154}, {"image_id": 17, "category_id": 3, "bbox": [706.8301391601562, 387.5335998535156, 211.49652099609375, 177.88278198242188], "score": 0.04261484742164612}, {"image_id": 17, "category_id": 3, "bbox": [488.0187072753906, 284.7178649902344, 304.5642395019531, 394.5537414550781], "score": 0.04234609007835388}, {"image_id": 17, "category_id": 3, "bbox": [544.93994140625, 348.9957275390625, 217.6666259765625, 184.4595947265625], "score": 0.04165088012814522}, {"image_id": 17, "category_id": 3, "bbox": [645.6270141601562, 220.14573669433594, 209.63946533203125, 185.7073516845703], "score": 0.04131752997636795}, {"image_id": 17, "category_id": 3, "bbox": [966.107666015625, 244.1626739501953, 210.282958984375, 175.5561065673828], "score": 0.04065334051847458}, {"image_id": 17, "category_id": 3, "bbox": [1053.5390625, 83.8666763305664, 226.4609375, 386.83870697021484], "score": 0.04045395180583}, {"image_id": 17, "category_id": 3, "bbox": [610.4306030273438, 293.4707336425781, 213.40228271484375, 184.33670043945312], "score": 0.04007026180624962}, {"image_id": 17, "category_id": 3, "bbox": [703.7232666015625, 24.22433853149414, 208.80230712890625, 431.1375389099121], "score": 0.039664097130298615}, {"image_id": 18, "category_id": 1, "bbox": [378.3367614746094, 195.92410278320312, 208.42550659179688, 371.0279235839844], "score": 0.7623647451400757}, {"image_id": 18, "category_id": 1, "bbox": [394.0413513183594, 219.8179473876953, 276.2051696777344, 373.9988250732422], "score": 0.2814333736896515}, {"image_id": 18, "category_id": 1, "bbox": [703.5492553710938, 300.6255798339844, 230.95159912109375, 362.0179748535156], "score": 0.19475990533828735}, {"image_id": 18, "category_id": 1, "bbox": [638.1650390625, 303.7394104003906, 207.6556396484375, 359.9249572753906], "score": 0.16748231649398804}, {"image_id": 18, "category_id": 1, "bbox": [902.94921875, 198.24871826171875, 256.4578857421875, 437.72418212890625], "score": 0.10810670256614685}, {"image_id": 18, "category_id": 1, "bbox": [106.20220947265625, 18.539634704589844, 645.5755004882812, 633.1562271118164], "score": 0.07626570016145706}, {"image_id": 18, "category_id": 1, "bbox": [1064.8153076171875, 0.0, 215.1846923828125, 404.5569152832031], "score": 0.0733998715877533}, {"image_id": 18, "category_id": 1, "bbox": [504.9007568359375, 23.01079559326172, 678.9638671875, 609.4094314575195], "score": 0.056008074432611465}, {"image_id": 18, "category_id": 1, "bbox": [378.7657470703125, 72.67706298828125, 216.26708984375, 407.83282470703125], "score": 0.051660556346178055}, {"image_id": 18, "category_id": 1, "bbox": [0.0, 247.5580596923828, 269.91741943359375, 404.1985321044922], "score": 0.04924057051539421}, {"image_id": 18, "category_id": 1, "bbox": [0.0, 9.866340637207031, 529.7532958984375, 626.3154830932617], "score": 0.04732197895646095}, {"image_id": 18, "category_id": 1, "bbox": [878.2665405273438, 153.4778594970703, 249.14617919921875, 386.47544860839844], "score": 0.04477604106068611}, {"image_id": 18, "category_id": 1, "bbox": [283.2855529785156, 96.89364624023438, 619.3069152832031, 591.5948791503906], "score": 0.04313509911298752}, {"image_id": 18, "category_id": 1, "bbox": [960.3525390625, 198.50619506835938, 307.614990234375, 435.2538757324219], "score": 0.03770344331860542}, {"image_id": 18, "category_id": 1, "bbox": [631.6859130859375, 401.4652404785156, 234.12554931640625, 318.5347595214844], "score": 0.0363168902695179}, {"image_id": 18, "category_id": 1, "bbox": [143.0811767578125, 250.8874969482422, 626.2338256835938, 469.1125030517578], "score": 0.03596845641732216}, {"image_id": 18, "category_id": 1, "bbox": [679.914306640625, 0.0, 575.9248046875, 578.9341430664062], "score": 0.034295275807380676}, {"image_id": 18, "category_id": 1, "bbox": [397.08233642578125, 484.69036865234375, 324.33709716796875, 235.30963134765625], "score": 0.03308970853686333}, {"image_id": 18, "category_id": 1, "bbox": [691.329345703125, 168.05421447753906, 241.89947509765625, 438.4893035888672], "score": 0.03301386907696724}, {"image_id": 18, "category_id": 1, "bbox": [236.0262451171875, 323.4324951171875, 691.5599365234375, 396.5675048828125], "score": 0.03274068236351013}, {"image_id": 18, "category_id": 2, "bbox": [324.1338806152344, 453.7413024902344, 357.1318664550781, 266.2586975097656], "score": 0.471381276845932}, {"image_id": 18, "category_id": 2, "bbox": [393.1677551269531, 373.07916259765625, 327.1702575683594, 346.92083740234375], "score": 0.38381755352020264}, {"image_id": 18, "category_id": 2, "bbox": [455.2711486816406, 456.24847412109375, 291.3869323730469, 263.75152587890625], "score": 0.31027480959892273}, {"image_id": 18, "category_id": 2, "bbox": [302.7915954589844, 315.87127685546875, 297.3692321777344, 404.12872314453125], "score": 0.14117804169654846}, {"image_id": 18, "category_id": 2, "bbox": [378.656005859375, 160.03268432617188, 205.98883056640625, 391.1166687011719], "score": 0.09555286914110184}, {"image_id": 18, "category_id": 2, "bbox": [465.8782958984375, 296.70782470703125, 293.570556640625, 378.60223388671875], "score": 0.07416098564863205}, {"image_id": 18, "category_id": 2, "bbox": [610.6525268554688, 437.3165588378906, 274.17864990234375, 282.6834411621094], "score": 0.07159018516540527}, {"image_id": 18, "category_id": 2, "bbox": [709.1884155273438, 347.5819396972656, 220.23516845703125, 345.4731140136719], "score": 0.0711197555065155}, {"image_id": 18, "category_id": 2, "bbox": [642.3698120117188, 351.15057373046875, 197.2906494140625, 341.7117919921875], "score": 0.06710785627365112}, {"image_id": 18, "category_id": 2, "bbox": [244.88284301757812, 432.4959716796875, 286.3034973144531, 287.5040283203125], "score": 0.06679915636777878}, {"image_id": 18, "category_id": 2, "bbox": [530.710693359375, 373.3023681640625, 302.92864990234375, 346.6976318359375], "score": 0.06543987989425659}, {"image_id": 18, "category_id": 2, "bbox": [298.15985107421875, 489.4255676269531, 285.55010986328125, 230.57443237304688], "score": 0.06081177666783333}, {"image_id": 18, "category_id": 2, "bbox": [668.7447509765625, 471.92266845703125, 282.7177734375, 248.07733154296875], "score": 0.060106538236141205}, {"image_id": 18, "category_id": 2, "bbox": [732.7022705078125, 446.1139831542969, 282.23028564453125, 273.8860168457031], "score": 0.055007316172122955}, {"image_id": 18, "category_id": 2, "bbox": [136.74020385742188, 402.15679931640625, 339.7002258300781, 317.84320068359375], "score": 0.05067078769207001}, {"image_id": 18, "category_id": 2, "bbox": [238.60604858398438, 113.80126190185547, 287.4553527832031, 453.88123321533203], "score": 0.04947389289736748}, {"image_id": 18, "category_id": 2, "bbox": [525.149658203125, 506.5279846191406, 337.944580078125, 213.47201538085938], "score": 0.048784639686346054}, {"image_id": 18, "category_id": 2, "bbox": [388.677734375, 197.03233337402344, 285.5111083984375, 371.14747619628906], "score": 0.048598356544971466}, {"image_id": 18, "category_id": 2, "bbox": [543.1112670898438, 241.01011657714844, 276.91876220703125, 411.30714416503906], "score": 0.04650072753429413}, {"image_id": 18, "category_id": 2, "bbox": [698.9453125, 202.88929748535156, 236.256591796875, 431.63279724121094], "score": 0.04538002982735634}, {"image_id": 18, "category_id": 2, "bbox": [59.82391357421875, 0.0, 361.60150146484375, 282.5313415527344], "score": 0.043511804193258286}, {"image_id": 18, "category_id": 2, "bbox": [784.7247314453125, 470.38568115234375, 322.786865234375, 249.61431884765625], "score": 0.043339043855667114}, {"image_id": 18, "category_id": 2, "bbox": [621.1810302734375, 202.1890106201172, 245.0223388671875, 431.4572296142578], "score": 0.04252912476658821}, {"image_id": 18, "category_id": 2, "bbox": [718.5948486328125, 0.0, 318.3511962890625, 324.77899169921875], "score": 0.03994551673531532}, {"image_id": 18, "category_id": 2, "bbox": [473.9389953613281, 145.9246826171875, 281.4597473144531, 464.72149658203125], "score": 0.03973711282014847}, {"image_id": 18, "category_id": 2, "bbox": [860.4546508789062, 439.349609375, 313.93988037109375, 280.650390625], "score": 0.03860880807042122}, {"image_id": 18, "category_id": 2, "bbox": [267.34039306640625, 0.0, 331.1688232421875, 331.5859680175781], "score": 0.03795885294675827}, {"image_id": 18, "category_id": 2, "bbox": [164.09274291992188, 70.21791076660156, 274.2289123535156, 381.5216827392578], "score": 0.036958374083042145}, {"image_id": 18, "category_id": 2, "bbox": [236.53695678710938, 280.36090087890625, 302.4324035644531, 400.855712890625], "score": 0.036756087094545364}, {"image_id": 18, "category_id": 2, "bbox": [195.22695922851562, 0.0, 314.8653869628906, 361.2481384277344], "score": 0.036553796380758286}, {"image_id": 18, "category_id": 2, "bbox": [788.4056396484375, 336.8262634277344, 290.767333984375, 360.7493591308594], "score": 0.03619789332151413}, {"image_id": 18, "category_id": 2, "bbox": [387.63623046875, 495.8001403808594, 222.51025390625, 176.47427368164062], "score": 0.035783618688583374}, {"image_id": 18, "category_id": 2, "bbox": [905.9371337890625, 307.74468994140625, 228.9212646484375, 401.012939453125], "score": 0.0355747826397419}, {"image_id": 18, "category_id": 2, "bbox": [0.0, 321.8049011230469, 660.8051147460938, 398.1950988769531], "score": 0.03491321578621864}, {"image_id": 18, "category_id": 2, "bbox": [283.2855529785156, 96.89364624023438, 619.3069152832031, 591.5948791503906], "score": 0.0347018800675869}, {"image_id": 18, "category_id": 2, "bbox": [105.73907470703125, 0.0, 689.4277954101562, 320.4985046386719], "score": 0.034700021147727966}, {"image_id": 18, "category_id": 2, "bbox": [0.0, 9.412090301513672, 388.7626647949219, 617.0921211242676], "score": 0.034652210772037506}, {"image_id": 18, "category_id": 2, "bbox": [626.885986328125, 0.0, 247.58837890625, 318.8390808105469], "score": 0.03447321429848671}, {"image_id": 18, "category_id": 2, "bbox": [79.10627746582031, 468.3641052246094, 328.0294647216797, 251.63589477539062], "score": 0.03376388177275658}, {"image_id": 18, "category_id": 2, "bbox": [728.5347290039062, 284.3084411621094, 269.26611328125, 402.9983825683594], "score": 0.03374774008989334}, {"image_id": 18, "category_id": 2, "bbox": [23.95074462890625, 99.69330596923828, 621.4078369140625, 593.5692672729492], "score": 0.033665623515844345}, {"image_id": 18, "category_id": 2, "bbox": [456.7265625, 409.7964172363281, 727.149658203125, 310.2035827636719], "score": 0.03351181373000145}, {"image_id": 18, "category_id": 2, "bbox": [653.7388916015625, 0.0, 309.4835205078125, 402.42266845703125], "score": 0.033268481492996216}, {"image_id": 18, "category_id": 2, "bbox": [644.80126953125, 237.69895935058594, 635.19873046875, 482.30104064941406], "score": 0.033236123621463776}, {"image_id": 18, "category_id": 2, "bbox": [484.02496337890625, 493.10845947265625, 214.7664794921875, 184.646728515625], "score": 0.03316330537199974}, {"image_id": 18, "category_id": 2, "bbox": [324.50555419921875, 74.836669921875, 246.2037353515625, 409.7110290527344], "score": 0.03297832980751991}, {"image_id": 18, "category_id": 2, "bbox": [329.71966552734375, 237.7452392578125, 740.9048461914062, 482.2547607421875], "score": 0.032668791711330414}, {"image_id": 18, "category_id": 2, "bbox": [106.55457305908203, 105.35771942138672, 280.52034759521484, 397.0501480102539], "score": 0.0324617438018322}, {"image_id": 18, "category_id": 2, "bbox": [0.0, 0.0, 272.40753173828125, 248.42428588867188], "score": 0.032403916120529175}, {"image_id": 18, "category_id": 3, "bbox": [320.6970520019531, 371.225830078125, 356.3555603027344, 348.774169921875], "score": 0.15996046364307404}, {"image_id": 18, "category_id": 3, "bbox": [378.3367614746094, 195.92410278320312, 208.42550659179688, 371.0279235839844], "score": 0.1563960164785385}, {"image_id": 18, "category_id": 3, "bbox": [448.41815185546875, 418.72076416015625, 296.75396728515625, 301.27923583984375], "score": 0.11423150449991226}, {"image_id": 18, "category_id": 3, "bbox": [638.1650390625, 303.7394104003906, 207.6556396484375, 359.9249572753906], "score": 0.06795909255743027}, {"image_id": 18, "category_id": 3, "bbox": [255.18463134765625, 238.7335205078125, 654.9059448242188, 481.2664794921875], "score": 0.06526646018028259}, {"image_id": 18, "category_id": 3, "bbox": [703.5492553710938, 300.6255798339844, 230.95159912109375, 362.0179748535156], "score": 0.05650874227285385}, {"image_id": 18, "category_id": 3, "bbox": [325.7540588378906, 491.51287841796875, 212.68966674804688, 186.75555419921875], "score": 0.052141398191452026}, {"image_id": 18, "category_id": 3, "bbox": [457.0799255371094, 491.18994140625, 214.71133422851562, 187.14208984375], "score": 0.049471933394670486}, {"image_id": 18, "category_id": 3, "bbox": [387.63623046875, 495.8001403808594, 222.51025390625, 176.47427368164062], "score": 0.0461709201335907}, {"image_id": 18, "category_id": 3, "bbox": [106.20220947265625, 18.539634704589844, 645.5755004882812, 633.1562271118164], "score": 0.045348431915044785}, {"image_id": 18, "category_id": 3, "bbox": [930.6183471679688, 164.83251953125, 238.29632568359375, 442.7421875], "score": 0.04329783096909523}, {"image_id": 18, "category_id": 3, "bbox": [353.29913330078125, 526.72314453125, 223.05828857421875, 191.20758056640625], "score": 0.04234568402171135}, {"image_id": 18, "category_id": 3, "bbox": [475.33349609375, 179.2575225830078, 704.500732421875, 540.7424774169922], "score": 0.04073738306760788}, {"image_id": 18, "category_id": 3, "bbox": [289.81903076171875, 528.37255859375, 217.27886962890625, 187.207275390625], "score": 0.040078405290842056}, {"image_id": 18, "category_id": 3, "bbox": [485.51446533203125, 528.914794921875, 215.72998046875, 187.55242919921875], "score": 0.04004266485571861}, {"image_id": 18, "category_id": 3, "bbox": [394.0413513183594, 219.8179473876953, 276.2051696777344, 373.9988250732422], "score": 0.039882756769657135}, {"image_id": 18, "category_id": 3, "bbox": [389.6073913574219, 219.47677612304688, 211.39059448242188, 187.49356079101562], "score": 0.03967898339033127}, {"image_id": 18, "category_id": 3, "bbox": [511.8871154785156, 497.6292419433594, 218.44497680664062, 176.11117553710938], "score": 0.036932192742824554}, {"image_id": 18, "category_id": 3, "bbox": [612.476318359375, 408.1797790527344, 213.99462890625, 172.42745971679688], "score": 0.03634930029511452}, {"image_id": 18, "category_id": 3, "bbox": [415.9002380371094, 547.1441040039062, 221.84854125976562, 172.85589599609375], "score": 0.035592641681432724}, {"image_id": 18, "category_id": 3, "bbox": [122.76187133789062, 323.58416748046875, 663.1677551269531, 396.41583251953125], "score": 0.03556632623076439}, {"image_id": 18, "category_id": 3, "bbox": [680.2394409179688, 387.1216735839844, 215.47357177734375, 179.10140991210938], "score": 0.03538840636610985}, {"image_id": 18, "category_id": 3, "bbox": [258.97711181640625, 496.557373046875, 216.263427734375, 176.9794921875], "score": 0.03474612161517143}, {"image_id": 18, "category_id": 3, "bbox": [15.03466796875, 250.7554931640625, 629.3453369140625, 469.2445068359375], "score": 0.03322818502783775}, {"image_id": 18, "category_id": 3, "bbox": [929.8927612304688, 332.947265625, 216.54315185546875, 178.98568725585938], "score": 0.03320501744747162}, {"image_id": 18, "category_id": 3, "bbox": [391.31072998046875, 311.4112548828125, 212.38775634765625, 182.62442016601562], "score": 0.03311631828546524}, {"image_id": 18, "category_id": 3, "bbox": [296.27508544921875, 463.3774719238281, 210.382080078125, 171.62008666992188], "score": 0.03304464742541313}, {"image_id": 18, "category_id": 3, "bbox": [679.1895751953125, 460.39923095703125, 213.44403076171875, 178.41015625], "score": 0.032768744975328445}, {"image_id": 18, "category_id": 3, "bbox": [225.91680908203125, 104.71916961669922, 216.26199340820312, 168.42151641845703], "score": 0.03266642615199089}, {"image_id": 18, "category_id": 3, "bbox": [610.039794921875, 463.2660827636719, 216.26654052734375, 173.37515258789062], "score": 0.032444730401039124}, {"image_id": 18, "category_id": 3, "bbox": [354.6922912597656, 463.66717529296875, 217.70217895507812, 169.59228515625], "score": 0.032367657870054245}, {"image_id": 19, "category_id": 1, "bbox": [976.2610473632812, 231.10968017578125, 340.45953369140625, 556.9378051757812], "score": 0.2779507339000702}, {"image_id": 19, "category_id": 1, "bbox": [1046.191162109375, 75.35765075683594, 307.9295654296875, 579.7418365478516], "score": 0.23956657946109772}, {"image_id": 19, "category_id": 1, "bbox": [987.165771484375, 7.469252586364746, 317.4222412109375, 602.380295753479], "score": 0.18107813596725464}, {"image_id": 19, "category_id": 1, "bbox": [1088.55224609375, 72.1147232055664, 396.49365234375, 587.5006332397461], "score": 0.11799919605255127}, {"image_id": 19, "category_id": 1, "bbox": [0.0, 8.323379516601562, 982.7437744140625, 980.5385589599609], "score": 0.07315238565206528}, {"image_id": 19, "category_id": 1, "bbox": [1056.4940185546875, 225.6650848388672, 447.698974609375, 565.0355987548828], "score": 0.07173121720552444}, {"image_id": 19, "category_id": 1, "bbox": [1024.0103759765625, 285.7401123046875, 342.6749267578125, 606.516357421875], "score": 0.05377933382987976}, {"image_id": 19, "category_id": 1, "bbox": [188.1573486328125, 22.721872329711914, 382.71881103515625, 530.4128932952881], "score": 0.050310444086790085}, {"image_id": 19, "category_id": 1, "bbox": [0.0, 347.1096496582031, 751.083251953125, 732.8903503417969], "score": 0.04764668643474579}, {"image_id": 19, "category_id": 1, "bbox": [0.0, 470.080810546875, 387.5464782714844, 609.919189453125], "score": 0.04587952792644501}, {"image_id": 19, "category_id": 1, "bbox": [871.1122436523438, 65.80194091796875, 393.05804443359375, 564.6244506835938], "score": 0.04100384563207626}, {"image_id": 19, "category_id": 1, "bbox": [181.70640563964844, 165.7935028076172, 384.0589141845703, 572.8975372314453], "score": 0.03998097777366638}, {"image_id": 19, "category_id": 1, "bbox": [837.4260864257812, 0.0, 904.5148315429688, 848.1788940429688], "score": 0.03972133994102478}, {"image_id": 19, "category_id": 2, "bbox": [976.08056640625, 189.02584838867188, 340.8729248046875, 570.3514099121094], "score": 0.21593865752220154}, {"image_id": 19, "category_id": 2, "bbox": [1056.4940185546875, 225.6650848388672, 447.698974609375, 565.0355987548828], "score": 0.10331302881240845}, {"image_id": 19, "category_id": 2, "bbox": [1046.191162109375, 75.35765075683594, 307.9295654296875, 579.7418365478516], "score": 0.09513430297374725}, {"image_id": 19, "category_id": 2, "bbox": [1024.0103759765625, 285.7401123046875, 342.6749267578125, 606.516357421875], "score": 0.07224012166261673}, {"image_id": 19, "category_id": 2, "bbox": [144.45669555664062, 265.9378967285156, 419.5923767089844, 596.5319519042969], "score": 0.07073138654232025}, {"image_id": 19, "category_id": 2, "bbox": [870.2540283203125, 124.25701904296875, 410.005859375, 569.1414794921875], "score": 0.06735281646251678}, {"image_id": 19, "category_id": 2, "bbox": [987.165771484375, 7.469252586364746, 317.4222412109375, 602.380295753479], "score": 0.06652039289474487}, {"image_id": 19, "category_id": 2, "bbox": [962.03125, 353.3863220214844, 338.3389892578125, 612.0702819824219], "score": 0.0662231296300888}, {"image_id": 19, "category_id": 2, "bbox": [1088.55224609375, 72.1147232055664, 396.49365234375, 587.5006332397461], "score": 0.06581012904644012}, {"image_id": 19, "category_id": 2, "bbox": [191.88743591308594, 118.70028686523438, 374.95985412597656, 575.8288269042969], "score": 0.06514868885278702}, {"image_id": 19, "category_id": 2, "bbox": [247.09913635253906, 216.5903778076172, 391.65977478027344, 579.2156524658203], "score": 0.06197457015514374}, {"image_id": 19, "category_id": 2, "bbox": [880.118408203125, 500.3360595703125, 475.1705322265625, 561.409423828125], "score": 0.06191394105553627}, {"image_id": 19, "category_id": 2, "bbox": [215.90127563476562, 336.9872741699219, 464.7719421386719, 576.9812316894531], "score": 0.05897326394915581}, {"image_id": 19, "category_id": 2, "bbox": [794.968505859375, 592.1873779296875, 480.3814697265625, 487.8126220703125], "score": 0.056223493069410324}, {"image_id": 19, "category_id": 2, "bbox": [595.0888061523438, 587.7406616210938, 504.49468994140625, 492.25933837890625], "score": 0.05611027404665947}, {"image_id": 19, "category_id": 2, "bbox": [998.4468383789062, 428.6310119628906, 404.55303955078125, 581.1002502441406], "score": 0.05570375174283981}, {"image_id": 19, "category_id": 2, "bbox": [48.40180206298828, 213.53280639648438, 429.0369186401367, 588.7864685058594], "score": 0.05508790910243988}, {"image_id": 19, "category_id": 2, "bbox": [1185.9754638671875, 205.64341735839844, 438.3289794921875, 621.4057159423828], "score": 0.05438920855522156}, {"image_id": 19, "category_id": 2, "bbox": [329.97674560546875, 271.6806335449219, 428.1158447265625, 590.3512878417969], "score": 0.05232679843902588}, {"image_id": 19, "category_id": 2, "bbox": [115.6014175415039, 387.76861572265625, 469.0916976928711, 607.750732421875], "score": 0.051268402487039566}, {"image_id": 19, "category_id": 2, "bbox": [679.6268920898438, 695.5230102539062, 540.2052612304688, 384.47698974609375], "score": 0.05016888678073883}, {"image_id": 19, "category_id": 2, "bbox": [72.8744888305664, 71.99635314941406, 442.1996078491211, 562.5988006591797], "score": 0.048838261514902115}, {"image_id": 19, "category_id": 2, "bbox": [303.85498046875, 75.57845306396484, 452.17877197265625, 544.2988662719727], "score": 0.048651471734046936}, {"image_id": 19, "category_id": 2, "bbox": [897.4072875976562, 699.4364013671875, 476.56121826171875, 380.5635986328125], "score": 0.048538923263549805}, {"image_id": 19, "category_id": 2, "bbox": [0.0, 161.57835388183594, 361.39227294921875, 588.9981842041016], "score": 0.048459600657224655}, {"image_id": 19, "category_id": 2, "bbox": [1088.171630859375, 423.7728576660156, 433.5130615234375, 595.4004821777344], "score": 0.047765277326107025}, {"image_id": 19, "category_id": 2, "bbox": [189.62887573242188, 0.0, 385.8398132324219, 498.7991027832031], "score": 0.04757177457213402}, {"image_id": 19, "category_id": 2, "bbox": [736.7484741210938, 111.84502410888672, 436.04571533203125, 593.1538772583008], "score": 0.04691697657108307}, {"image_id": 19, "category_id": 2, "bbox": [704.844482421875, 443.0769958496094, 466.5421142578125, 571.3669738769531], "score": 0.04672427847981453}, {"image_id": 19, "category_id": 2, "bbox": [847.0346069335938, 284.6083068847656, 420.98614501953125, 623.4518737792969], "score": 0.0463629812002182}, {"image_id": 19, "category_id": 2, "bbox": [986.1791381835938, 594.88671875, 443.06011962890625, 485.11328125], "score": 0.046347614377737045}, {"image_id": 19, "category_id": 2, "bbox": [894.7033081054688, 0.0, 453.66009521484375, 438.53277587890625], "score": 0.04574732854962349}, {"image_id": 19, "category_id": 2, "bbox": [1191.1533203125, 66.44688415527344, 408.3033447265625, 586.3166046142578], "score": 0.04561192914843559}, {"image_id": 19, "category_id": 2, "bbox": [495.5445556640625, 678.8156127929688, 519.538330078125, 401.18438720703125], "score": 0.044796884059906006}, {"image_id": 19, "category_id": 2, "bbox": [795.5469360351562, 0.0, 432.13922119140625, 481.9593200683594], "score": 0.044103506952524185}, {"image_id": 19, "category_id": 2, "bbox": [25.030563354492188, 352.156982421875, 426.63417053222656, 564.328125], "score": 0.04356403648853302}, {"image_id": 19, "category_id": 2, "bbox": [423.4324951171875, 215.85400390625, 425.61260986328125, 592.2444458007812], "score": 0.04354160279035568}, {"image_id": 19, "category_id": 2, "bbox": [563.3553466796875, 300.87689208984375, 1017.3701171875, 779.1231079101562], "score": 0.04350592941045761}, {"image_id": 19, "category_id": 2, "bbox": [722.4251098632812, 242.85232543945312, 456.50677490234375, 621.8262023925781], "score": 0.043467629700899124}, {"image_id": 19, "category_id": 2, "bbox": [1305.772705078125, 643.6773071289062, 469.075927734375, 436.32269287109375], "score": 0.041191283613443375}, {"image_id": 19, "category_id": 2, "bbox": [589.2272338867188, 0.0, 1035.3291625976562, 850.8948364257812], "score": 0.04073993116617203}, {"image_id": 19, "category_id": 2, "bbox": [1276.8076171875, 135.4638214111328, 392.14892578125, 561.5039520263672], "score": 0.04065113142132759}, {"image_id": 19, "category_id": 2, "bbox": [187.45436096191406, 463.9432373046875, 523.8433685302734, 616.0567626953125], "score": 0.039810746908187866}, {"image_id": 19, "category_id": 2, "bbox": [1458.4031982421875, 576.9672241210938, 461.5968017578125, 503.03277587890625], "score": 0.03954066336154938}, {"image_id": 19, "category_id": 2, "bbox": [810.8377685546875, 432.6242980957031, 920.5947265625, 647.3757019042969], "score": 0.03941328078508377}, {"image_id": 19, "category_id": 2, "bbox": [1087.4700927734375, 0.0, 434.5146484375, 445.3666076660156], "score": 0.03866986930370331}, {"image_id": 19, "category_id": 2, "bbox": [390.56500244140625, 16.50750732421875, 460.637451171875, 549.4373779296875], "score": 0.03826947510242462}, {"image_id": 19, "category_id": 2, "bbox": [718.1953125, 0.0, 378.001708984375, 460.36151123046875], "score": 0.03780439496040344}, {"image_id": 19, "category_id": 2, "bbox": [531.3228149414062, 265.450439453125, 418.85614013671875, 599.794189453125], "score": 0.037765540182590485}, {"image_id": 19, "category_id": 2, "bbox": [768.834228515625, 770.735595703125, 552.0435791015625, 309.264404296875], "score": 0.03775879740715027}, {"image_id": 19, "category_id": 2, "bbox": [1577.11572265625, 282.9234313964844, 342.88427734375, 656.9679260253906], "score": 0.03748423978686333}, {"image_id": 19, "category_id": 2, "bbox": [418.00335693359375, 390.0381774902344, 450.4788818359375, 575.7168884277344], "score": 0.037407536059617996}, {"image_id": 19, "category_id": 2, "bbox": [1082.3115234375, 646.7191772460938, 467.9766845703125, 433.28082275390625], "score": 0.037359997630119324}, {"image_id": 19, "category_id": 2, "bbox": [1191.4600830078125, 463.9490051269531, 411.068115234375, 613.9685974121094], "score": 0.03725411742925644}, {"image_id": 19, "category_id": 2, "bbox": [620.8862915039062, 378.92718505859375, 425.83941650390625, 588.8250122070312], "score": 0.03708365559577942}, {"image_id": 19, "category_id": 2, "bbox": [1300.2568359375, 0.0, 437.639404296875, 472.1343078613281], "score": 0.036905575543642044}, {"image_id": 19, "category_id": 2, "bbox": [282.4977722167969, 0.0, 494.4300842285156, 455.4599304199219], "score": 0.03683048114180565}, {"image_id": 19, "category_id": 2, "bbox": [517.1889038085938, 438.51837158203125, 442.90301513671875, 588.4667358398438], "score": 0.036562975496053696}, {"image_id": 19, "category_id": 3, "bbox": [976.2610473632812, 231.10968017578125, 340.45953369140625, 556.9378051757812], "score": 0.6750579476356506}, {"image_id": 19, "category_id": 3, "bbox": [1056.4940185546875, 225.6650848388672, 447.698974609375, 565.0355987548828], "score": 0.2974667549133301}, {"image_id": 19, "category_id": 3, "bbox": [1046.191162109375, 75.35765075683594, 307.9295654296875, 579.7418365478516], "score": 0.21392305195331573}, {"image_id": 19, "category_id": 3, "bbox": [987.165771484375, 7.469252586364746, 317.4222412109375, 602.380295753479], "score": 0.13943560421466827}, {"image_id": 19, "category_id": 3, "bbox": [1024.0103759765625, 285.7401123046875, 342.6749267578125, 606.516357421875], "score": 0.13899272680282593}, {"image_id": 19, "category_id": 3, "bbox": [1088.55224609375, 72.1147232055664, 396.49365234375, 587.5006332397461], "score": 0.10875702649354935}, {"image_id": 19, "category_id": 3, "bbox": [962.03125, 353.3863220214844, 338.3389892578125, 612.0702819824219], "score": 0.06968238204717636}, {"image_id": 19, "category_id": 3, "bbox": [871.1122436523438, 65.80194091796875, 393.05804443359375, 564.6244506835938], "score": 0.06342918425798416}, {"image_id": 19, "category_id": 3, "bbox": [1020.4742431640625, 382.7237548828125, 313.761474609375, 285.244140625], "score": 0.06207021698355675}, {"image_id": 19, "category_id": 3, "bbox": [188.1573486328125, 22.721872329711914, 382.71881103515625, 530.4128932952881], "score": 0.0614626482129097}, {"image_id": 19, "category_id": 3, "bbox": [1017.5429077148438, 281.44012451171875, 313.80865478515625, 272.09722900390625], "score": 0.056887295097112656}, {"image_id": 19, "category_id": 3, "bbox": [823.3425903320312, 277.83953857421875, 309.16070556640625, 278.90545654296875], "score": 0.05067446082830429}, {"image_id": 19, "category_id": 3, "bbox": [1189.2843017578125, 292.3200378417969, 429.9254150390625, 625.9740905761719], "score": 0.049926768988370895}, {"image_id": 19, "category_id": 3, "bbox": [1112.85595703125, 142.8488311767578, 317.3629150390625, 273.98451232910156], "score": 0.049067363142967224}, {"image_id": 19, "category_id": 3, "bbox": [964.6591186523438, 338.2109680175781, 319.89508056640625, 269.3692321777344], "score": 0.04887603968381882}, {"image_id": 19, "category_id": 3, "bbox": [863.5625610351562, 235.6002655029297, 414.17633056640625, 620.2220611572266], "score": 0.04715320095419884}, {"image_id": 19, "category_id": 3, "bbox": [919.4712524414062, 288.9963073730469, 313.13470458984375, 254.59024047851562], "score": 0.04555204510688782}, {"image_id": 19, "category_id": 3, "bbox": [1065.6463623046875, 444.11859130859375, 323.60693359375, 276.862060546875], "score": 0.04455772042274475}, {"image_id": 19, "category_id": 3, "bbox": [1015.0892944335938, 144.9364013671875, 317.73858642578125, 273.02032470703125], "score": 0.04453849792480469}, {"image_id": 19, "category_id": 3, "bbox": [998.4468383789062, 428.6310119628906, 404.55303955078125, 581.1002502441406], "score": 0.04440160095691681}, {"image_id": 19, "category_id": 3, "bbox": [1106.8955078125, 339.40692138671875, 323.032470703125, 264.76470947265625], "score": 0.04419013112783432}, {"image_id": 19, "category_id": 3, "bbox": [914.8173217773438, 390.1963806152344, 325.92730712890625, 270.8747863769531], "score": 0.04176688566803932}, {"image_id": 19, "category_id": 3, "bbox": [966.6874389648438, 441.8755798339844, 330.83966064453125, 279.9009094238281], "score": 0.04125269129872322}, {"image_id": 19, "category_id": 3, "bbox": [94.20378112792969, 18.9246883392334, 1131.5684356689453, 958.9851627349854], "score": 0.040623970329761505}, {"image_id": 19, "category_id": 3, "bbox": [960.3589477539062, 555.0917358398438, 326.29974365234375, 269.205078125], "score": 0.03992971032857895}, {"image_id": 19, "category_id": 3, "bbox": [959.7347412109375, 236.54559326171875, 326.98876953125, 260.560546875], "score": 0.03974037617444992}, {"image_id": 19, "category_id": 3, "bbox": [859.833251953125, 238.96865844726562, 329.5140380859375, 258.1942443847656], "score": 0.03957970812916756}, {"image_id": 19, "category_id": 3, "bbox": [1060.1590576171875, 197.92642211914062, 316.0316162109375, 277.5107727050781], "score": 0.03806448355317116}, {"image_id": 19, "category_id": 3, "bbox": [1109.9638671875, 232.12229919433594, 326.7099609375, 263.88279724121094], "score": 0.0369894914329052}]
\ No newline at end of file
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/coco_detection_main.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/coco_detection_main.yml
new file mode 100644
index 000000000..69af912b5
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/coco_detection_main.yml
@@ -0,0 +1,19 @@
+metric: COCO
+num_classes: 3
+
+TrainDataset:
+ !COCODataSet
+ image_dir: picodet_motorcycle/JPEGImages/
+ anno_path: voc_train.json
+ dataset_dir: /home/aistudio/data/data128282/
+ data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
+
+EvalDataset:
+ !COCODataSet
+ image_dir: picodet_motorcycle/JPEGImages/
+ anno_path: voc_test.json
+ dataset_dir: /home/aistudio/data/data128282/
+
+TestDataset:
+ !ImageFolder
+ anno_path: voc_test.json
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/eval.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/eval.py
new file mode 100644
index 000000000..67f5383ff
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/eval.py
@@ -0,0 +1,144 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+
+# add python path of PadleDetection to sys.path
+parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
+sys.path.insert(0, parent_path)
+
+# ignore warning log
+import warnings
+warnings.filterwarnings('ignore')
+
+import paddle
+
+from ppdet.core.workspace import load_config, merge_config
+from ppdet.utils.check import check_gpu, check_npu, check_version, check_config
+from ppdet.utils.cli import ArgsParser
+from ppdet.engine import Trainer, init_parallel_env
+from ppdet.metrics.coco_utils import json_eval_results
+from ppdet.slim import build_slim_model
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger('eval')
+
+
+def parse_args():
+ parser = ArgsParser()
+ parser.add_argument(
+ "--output_eval",
+ default=None,
+ type=str,
+ help="Evaluation directory, default is current directory.")
+
+ parser.add_argument(
+ '--json_eval',
+ action='store_true',
+ default=False,
+ help='Whether to re eval with already exists bbox.json or mask.json')
+
+ parser.add_argument(
+ "--slim_config",
+ default=None,
+ type=str,
+ help="Configuration file of slim method.")
+
+ # TODO: bias should be unified
+ parser.add_argument(
+ "--bias",
+ action="store_true",
+ help="whether add bias or not while getting w and h")
+
+ parser.add_argument(
+ "--classwise",
+ action="store_true",
+ help="whether per-category AP and draw P-R Curve or not.")
+
+ parser.add_argument(
+ '--save_prediction_only',
+ action='store_true',
+ default=False,
+ help='Whether to save the evaluation results only')
+
+ args = parser.parse_args()
+ return args
+
+
+def run(FLAGS, cfg):
+ if FLAGS.json_eval:
+ logger.info(
+ "In json_eval mode, PaddleDetection will evaluate json files in "
+ "output_eval directly. And proposal.json, bbox.json and mask.json "
+ "will be detected by default.")
+ json_eval_results(
+ cfg.metric,
+ json_directory=FLAGS.output_eval,
+ dataset=cfg['EvalDataset'])
+ return
+
+ # init parallel environment if nranks > 1
+ init_parallel_env()
+
+ # build trainer
+ trainer = Trainer(cfg, mode='eval')
+
+ # load weights
+ trainer.load_weights(cfg.weights)
+
+ # training
+ trainer.evaluate()
+
+
+def main():
+ FLAGS = parse_args()
+ cfg = load_config(FLAGS.config)
+ # TODO: bias should be unified
+ cfg['bias'] = 1 if FLAGS.bias else 0
+ cfg['classwise'] = True if FLAGS.classwise else False
+ cfg['output_eval'] = FLAGS.output_eval
+ cfg['save_prediction_only'] = FLAGS.save_prediction_only
+ merge_config(FLAGS.opt)
+
+ # disable npu in config by default
+ if 'use_npu' not in cfg:
+ cfg.use_npu = False
+
+ if cfg.use_gpu:
+ place = paddle.set_device('gpu')
+ elif cfg.use_npu:
+ place = paddle.set_device('npu')
+ else:
+ place = paddle.set_device('cpu')
+
+ if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:
+ cfg['norm_type'] = 'bn'
+
+ if FLAGS.slim_config:
+ cfg = build_slim_model(cfg, FLAGS.slim_config, mode='eval')
+ check_config(cfg)
+ check_gpu(cfg.use_gpu)
+ check_npu(cfg.use_npu)
+ check_version()
+
+ run(FLAGS, cfg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/export_model.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/export_model.py
new file mode 100644
index 000000000..deac2ea12
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/export_model.py
@@ -0,0 +1,115 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+
+# add python path of PadleDetection to sys.path
+parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
+sys.path.insert(0, parent_path)
+
+# ignore warning log
+import warnings
+warnings.filterwarnings('ignore')
+
+import paddle
+
+from ppdet.core.workspace import load_config, merge_config
+from ppdet.utils.check import check_gpu, check_version, check_config
+from ppdet.utils.cli import ArgsParser
+from ppdet.engine import Trainer
+from ppdet.slim import build_slim_model
+
+from ppdet.utils.logger import setup_logger
+logger = setup_logger('export_model')
+
+
+def parse_args():
+ parser = ArgsParser()
+ parser.add_argument(
+ "--output_dir",
+ type=str,
+ default="output_inference",
+ help="Directory for storing the output model files.")
+ parser.add_argument(
+ "--export_serving_model",
+ type=bool,
+ default=False,
+ help="Whether to export serving model or not.")
+ parser.add_argument(
+ "--slim_config",
+ default=None,
+ type=str,
+ help="Configuration file of slim method.")
+ args = parser.parse_args()
+ return args
+
+
+def run(FLAGS, cfg):
+ # build detector
+ trainer = Trainer(cfg, mode='test')
+
+ # load weights
+ if cfg.architecture in ['DeepSORT']:
+ if cfg.det_weights != 'None':
+ trainer.load_weights_sde(cfg.det_weights, cfg.reid_weights)
+ else:
+ trainer.load_weights_sde(None, cfg.reid_weights)
+ else:
+ trainer.load_weights(cfg.weights)
+
+ # export model
+ trainer.export(FLAGS.output_dir)
+
+ if FLAGS.export_serving_model:
+ from paddle_serving_client.io import inference_model_to_serving
+ model_name = os.path.splitext(os.path.split(cfg.filename)[-1])[0]
+
+ inference_model_to_serving(
+ dirname="{}/{}".format(FLAGS.output_dir, model_name),
+ serving_server="{}/{}/serving_server".format(FLAGS.output_dir,
+ model_name),
+ serving_client="{}/{}/serving_client".format(FLAGS.output_dir,
+ model_name),
+ model_filename="model.pdmodel",
+ params_filename="model.pdiparams")
+
+
+def main():
+ paddle.set_device("cpu")
+ FLAGS = parse_args()
+ cfg = load_config(FLAGS.config)
+ # TODO: to be refined in the future
+ if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn':
+ FLAGS.opt['norm_type'] = 'bn'
+ merge_config(FLAGS.opt)
+
+ if FLAGS.slim_config:
+ cfg = build_slim_model(cfg, FLAGS.slim_config, mode='test')
+
+ # FIXME: Temporarily solve the priority problem of FLAGS.opt
+ merge_config(FLAGS.opt)
+ check_config(cfg)
+ check_gpu(cfg.use_gpu)
+ check_version()
+
+ run(FLAGS, cfg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/optimizer_300e.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/optimizer_300e.yml
new file mode 100644
index 000000000..5a89bbbce
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/optimizer_300e.yml
@@ -0,0 +1,18 @@
+epoch: 300
+
+LearningRate:
+ base_lr: 0.4
+ schedulers:
+ - !CosineDecay
+ max_epochs: 300
+ - !LinearWarmup
+ start_factor: 0.1
+ steps: 300
+
+OptimizerBuilder:
+ optimizer:
+ momentum: 0.9
+ type: Momentum
+ regularizer:
+ factor: 0.00004
+ type: L2
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/best_model.pdopt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/best_model.pdopt
new file mode 100644
index 000000000..603f65d2d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/best_model.pdopt differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/best_model.pdparams b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/best_model.pdparams
new file mode 100644
index 000000000..0a266710c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/best_model.pdparams differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/model_final.pdopt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/model_final.pdopt
new file mode 100644
index 000000000..603f65d2d
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/model_final.pdopt differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/model_final.pdparams b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/model_final.pdparams
new file mode 100644
index 000000000..0a266710c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output/picodet_lcnet_1_5x_416_coco/model_final.pdparams differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/infer_cfg.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/infer_cfg.yml
new file mode 100644
index 000000000..e29f9298f
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/infer_cfg.yml
@@ -0,0 +1,118 @@
+mode: fluid
+draw_threshold: 0.5
+metric: COCO
+use_dynamic_shape: false
+arch: PicoDet
+min_subgraph_size: 3
+Preprocess:
+- interp: 2
+ keep_ratio: false
+ target_size:
+ - 640
+ - 640
+ type: Resize
+- is_scale: true
+ mean:
+ - 0.485
+ - 0.456
+ - 0.406
+ std:
+ - 0.229
+ - 0.224
+ - 0.225
+ type: NormalizeImage
+- type: Permute
+- stride: 32
+ type: PadStride
+label_list:
+- person
+- bicycle
+- car
+- motorcycle
+- airplane
+- bus
+- train
+- truck
+- boat
+- traffic light
+- fire hydrant
+- stop sign
+- parking meter
+- bench
+- bird
+- cat
+- dog
+- horse
+- sheep
+- cow
+- elephant
+- bear
+- zebra
+- giraffe
+- backpack
+- umbrella
+- handbag
+- tie
+- suitcase
+- frisbee
+- skis
+- snowboard
+- sports ball
+- kite
+- baseball bat
+- baseball glove
+- skateboard
+- surfboard
+- tennis racket
+- bottle
+- wine glass
+- cup
+- fork
+- knife
+- spoon
+- bowl
+- banana
+- apple
+- sandwich
+- orange
+- broccoli
+- carrot
+- hot dog
+- pizza
+- donut
+- cake
+- chair
+- couch
+- potted plant
+- bed
+- dining table
+- toilet
+- tv
+- laptop
+- mouse
+- remote
+- keyboard
+- cell phone
+- microwave
+- oven
+- toaster
+- sink
+- refrigerator
+- book
+- clock
+- vase
+- scissors
+- teddy bear
+- hair drier
+- toothbrush
+NMS:
+ keep_top_k: 100
+ name: MultiClassNMS
+ nms_threshold: 0.5
+ nms_top_k: 1000
+ score_threshold: 0.3
+fpn_stride:
+- 8
+- 16
+- 32
+- 64
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/model.pdiparams b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/model.pdiparams
new file mode 100644
index 000000000..b7edffd8a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/model.pdiparams differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/model.pdiparams.info b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/model.pdiparams.info
new file mode 100644
index 000000000..9c292e824
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/model.pdiparams.info differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/model.pdmodel b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/model.pdmodel
new file mode 100644
index 000000000..fb75636f0
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/model.pdmodel differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/serving_server/model.pdmodel b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/output_inference/picodet_lcnet_1_5x_416_coco/serving_server/model.pdmodel
new file mode 100644
index 000000000..e69de29bb
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/picodet_640_reader.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/picodet_640_reader.yml
new file mode 100644
index 000000000..a931f2a76
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/picodet_640_reader.yml
@@ -0,0 +1,41 @@
+worker_num: 6
+TrainReader:
+ sample_transforms:
+ - Decode: {}
+ - RandomCrop: {}
+ - RandomFlip: {prob: 0.5}
+ - RandomDistort: {}
+ batch_transforms:
+ - BatchRandomResize: {target_size: [576, 608, 640, 672, 704], random_size: True, random_interp: True, keep_ratio: False}
+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
+ - Permute: {}
+ batch_size: 56
+ shuffle: true
+ drop_last: true
+ collate_batch: false
+
+
+EvalReader:
+ sample_transforms:
+ - Decode: {}
+ - Resize: {interp: 2, target_size: [640, 640], keep_ratio: False}
+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
+ - Permute: {}
+ batch_transforms:
+ - PadBatch: {pad_to_stride: 32}
+ batch_size: 8
+ shuffle: false
+
+
+TestReader:
+ inputs_def:
+ image_shape: [1, 3, 640, 640]
+ sample_transforms:
+ - Decode: {}
+ - Resize: {interp: 2, target_size: [640, 640], keep_ratio: False}
+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
+ - Permute: {}
+ batch_transforms:
+ - PadBatch: {pad_to_stride: 32}
+ batch_size: 1
+ shuffle: false
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/picodet_esnet.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/picodet_esnet.yml
new file mode 100644
index 000000000..aa099fca1
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/picodet_esnet.yml
@@ -0,0 +1,55 @@
+architecture: PicoDet
+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ESNet_x1_0_pretrained.pdparams
+
+PicoDet:
+ backbone: ESNet
+ neck: CSPPAN
+ head: PicoHead
+
+ESNet:
+ scale: 1.0
+ feature_maps: [4, 11, 14]
+ act: hard_swish
+ channel_ratio: [0.875, 0.5, 1.0, 0.625, 0.5, 0.75, 0.625, 0.625, 0.5, 0.625, 1.0, 0.625, 0.75]
+
+CSPPAN:
+ out_channels: 128
+ use_depthwise: True
+ num_csp_blocks: 1
+ num_features: 4
+
+PicoHead:
+ conv_feat:
+ name: PicoFeat
+ feat_in: 128
+ feat_out: 128
+ num_convs: 4
+ num_fpn_stride: 4
+ norm_type: bn
+ share_cls_reg: True
+ fpn_stride: [8, 16, 32, 64]
+ feat_in_chan: 128
+ prior_prob: 0.01
+ reg_max: 7
+ cell_offset: 0.5
+ loss_class:
+ name: VarifocalLoss
+ use_sigmoid: True
+ iou_weighted: True
+ loss_weight: 1.0
+ loss_dfl:
+ name: DistributionFocalLoss
+ loss_weight: 0.25
+ loss_bbox:
+ name: GIoULoss
+ loss_weight: 2.0
+ assigner:
+ name: SimOTAAssigner
+ candidate_topk: 10
+ iou_weight: 6
+ nms:
+ name: MultiClassNMS
+ nms_top_k: 1000
+ keep_top_k: 100
+ score_threshold: 0.025
+ nms_threshold: 0.6
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/picodet_lcnet_1_5x_416_coco.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/picodet_lcnet_1_5x_416_coco.yml
new file mode 100644
index 000000000..302137468
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/picodet_lcnet_1_5x_416_coco.yml
@@ -0,0 +1,36 @@
+_BASE_: [
+ './coco_detection_main.yml',
+ './runtime.yml',
+ './picodet_esnet.yml',
+ './optimizer_300e.yml',
+ './picodet_640_reader.yml',
+]
+
+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/LCNet_x1_5_pretrained.pdparams
+weights: output/picodet_lcnet_1_5x_416_coco/best_model
+find_unused_parameters: True
+use_ema: true
+cycle_epoch: 40
+snapshot_epoch: 10
+epoch: 10
+
+PicoDet:
+ backbone: LCNet
+ neck: CSPPAN
+ head: PicoHead
+
+LCNet:
+ scale: 1.5
+ feature_maps: [3, 4, 5]
+
+TrainReader:
+ batch_size: 20
+
+LearningRate:
+ base_lr: 0.1
+ schedulers:
+ - !CosineDecay
+ max_epochs: 300
+ - !LinearWarmup
+ start_factor: 0.1
+ steps: 300
\ No newline at end of file
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/requirements.txt b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/requirements.txt
new file mode 100644
index 000000000..8b184e905
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/requirements.txt
@@ -0,0 +1,17 @@
+tqdm
+typeguard ; python_version >= '3.4'
+visualdl>=2.1.0 ; python_version <= '3.7'
+opencv-python
+PyYAML
+shapely
+scipy
+terminaltables
+Cython
+pycocotools
+#xtcocotools==1.6 #only for crowdpose
+setuptools>=42.0.0
+lap
+sklearn
+motmetrics
+openpyxl
+cython_bbox
\ No newline at end of file
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/runtime.yml b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/runtime.yml
new file mode 100644
index 000000000..c502ddabe
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/runtime.yml
@@ -0,0 +1,5 @@
+use_gpu: true
+log_iter: 20
+save_dir: output
+snapshot_epoch: 1
+print_flops: false
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/train.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/train.py
new file mode 100644
index 000000000..878aa60fa
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/train/train.py
@@ -0,0 +1,171 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+
+# add python path of PadleDetection to sys.path
+parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
+sys.path.insert(0, parent_path)
+
+# ignore warning log
+import warnings
+warnings.filterwarnings('ignore')
+
+import paddle
+
+from ppdet.core.workspace import load_config, merge_config
+from ppdet.engine import Trainer, init_parallel_env, set_random_seed, init_fleet_env
+from ppdet.slim import build_slim_model
+
+import ppdet.utils.cli as cli
+import ppdet.utils.check as check
+from ppdet.utils.logger import setup_logger
+logger = setup_logger('train')
+
+
+def parse_args():
+ parser = cli.ArgsParser()
+ parser.add_argument(
+ "--eval",
+ action='store_true',
+ default=False,
+ help="Whether to perform evaluation in train")
+ parser.add_argument(
+ "-r", "--resume", default=None, help="weights path for resume")
+ parser.add_argument(
+ "--slim_config",
+ default=None,
+ type=str,
+ help="Configuration file of slim method.")
+ parser.add_argument(
+ "--enable_ce",
+ type=bool,
+ default=False,
+ help="If set True, enable continuous evaluation job."
+ "This flag is only used for internal test.")
+ parser.add_argument(
+ "--fp16",
+ action='store_true',
+ default=False,
+ help="Enable mixed precision training.")
+ parser.add_argument(
+ "--fleet", action='store_true', default=False, help="Use fleet or not")
+ parser.add_argument(
+ "--use_vdl",
+ type=bool,
+ default=False,
+ help="whether to record the data to VisualDL.")
+ parser.add_argument(
+ '--vdl_log_dir',
+ type=str,
+ default="vdl_log_dir/scalar",
+ help='VisualDL logging directory for scalar.')
+ parser.add_argument(
+ '--save_prediction_only',
+ action='store_true',
+ default=False,
+ help='Whether to save the evaluation results only')
+ parser.add_argument(
+ '--profiler_options',
+ type=str,
+ default=None,
+ help="The option of profiler, which should be in "
+ "format \"key1=value1;key2=value2;key3=value3\"."
+ "please see ppdet/utils/profiler.py for detail.")
+ parser.add_argument(
+ '--save_proposals',
+ action='store_true',
+ default=False,
+ help='Whether to save the train proposals')
+ parser.add_argument(
+ '--proposals_path',
+ type=str,
+ default="sniper/proposals.json",
+ help='Train proposals directory')
+
+ args = parser.parse_args()
+ return args
+
+
+def run(FLAGS, cfg):
+ # init fleet environment
+ if cfg.fleet:
+ init_fleet_env(cfg.get('find_unused_parameters', False))
+ else:
+ # init parallel environment if nranks > 1
+ init_parallel_env()
+
+ if FLAGS.enable_ce:
+ set_random_seed(0)
+
+ # build trainer
+ trainer = Trainer(cfg, mode='train')
+
+ # load weights
+ if FLAGS.resume is not None:
+ trainer.resume_weights(FLAGS.resume)
+ elif 'pretrain_weights' in cfg and cfg.pretrain_weights:
+ trainer.load_weights(cfg.pretrain_weights)
+
+ # training
+ trainer.train(FLAGS.eval)
+
+
+def main():
+ FLAGS = parse_args()
+ cfg = load_config(FLAGS.config)
+ cfg['fp16'] = FLAGS.fp16
+ cfg['fleet'] = FLAGS.fleet
+ cfg['use_vdl'] = FLAGS.use_vdl
+ cfg['vdl_log_dir'] = FLAGS.vdl_log_dir
+ cfg['save_prediction_only'] = FLAGS.save_prediction_only
+ cfg['profiler_options'] = FLAGS.profiler_options
+ cfg['save_proposals'] = FLAGS.save_proposals
+ cfg['proposals_path'] = FLAGS.proposals_path
+ merge_config(FLAGS.opt)
+
+ # disable npu in config by default
+ if 'use_npu' not in cfg:
+ cfg.use_npu = False
+
+ if cfg.use_gpu:
+ place = paddle.set_device('gpu')
+ elif cfg.use_npu:
+ place = paddle.set_device('npu')
+ else:
+ place = paddle.set_device('cpu')
+
+ if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:
+ cfg['norm_type'] = 'bn'
+
+ if FLAGS.slim_config:
+ cfg = build_slim_model(cfg, FLAGS.slim_config)
+
+ # FIXME: Temporarily solve the priority problem of FLAGS.opt
+ merge_config(FLAGS.opt)
+ check.check_config(cfg)
+ check.check_gpu(cfg.use_gpu)
+ check.check_npu(cfg.use_npu)
+ check.check_version()
+
+ run(FLAGS, cfg)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/config.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/config.cpython-37.pyc
new file mode 100644
index 000000000..34c5168eb
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/config.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/get_image_list.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/get_image_list.cpython-37.pyc
new file mode 100644
index 000000000..c2ca48112
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/get_image_list.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/logger.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/logger.cpython-37.pyc
new file mode 100644
index 000000000..fdde1c9da
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/logger.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/predictor.cpython-37.pyc b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/predictor.cpython-37.pyc
new file mode 100644
index 000000000..a54b304db
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/__pycache__/predictor.cpython-37.pyc differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/config.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/config.py
new file mode 100644
index 000000000..eb7914806
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/config.py
@@ -0,0 +1,197 @@
+# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import copy
+import argparse
+import yaml
+
+from utils import logger
+
+__all__ = ['get_config']
+
+
+class AttrDict(dict):
+ def __getattr__(self, key):
+ return self[key]
+
+ def __setattr__(self, key, value):
+ if key in self.__dict__:
+ self.__dict__[key] = value
+ else:
+ self[key] = value
+
+ def __deepcopy__(self, content):
+ return copy.deepcopy(dict(self))
+
+
+def create_attr_dict(yaml_config):
+ from ast import literal_eval
+ for key, value in yaml_config.items():
+ if type(value) is dict:
+ yaml_config[key] = value = AttrDict(value)
+ if isinstance(value, str):
+ try:
+ value = literal_eval(value)
+ except BaseException:
+ pass
+ if isinstance(value, AttrDict):
+ create_attr_dict(yaml_config[key])
+ else:
+ yaml_config[key] = value
+
+
+def parse_config(cfg_file):
+ """Load a config file into AttrDict"""
+ with open(cfg_file, 'r') as fopen:
+ yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader))
+ create_attr_dict(yaml_config)
+ return yaml_config
+
+
+def print_dict(d, delimiter=0):
+ """
+ Recursively visualize a dict and
+ indenting acrrording by the relationship of keys.
+ """
+ placeholder = "-" * 60
+ for k, v in sorted(d.items()):
+ if isinstance(v, dict):
+ logger.info("{}{} : ".format(delimiter * " ",
+ logger.coloring(k, "HEADER")))
+ print_dict(v, delimiter + 4)
+ elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict):
+ logger.info("{}{} : ".format(delimiter * " ",
+ logger.coloring(str(k), "HEADER")))
+ for value in v:
+ print_dict(value, delimiter + 4)
+ else:
+ logger.info("{}{} : {}".format(delimiter * " ",
+ logger.coloring(k, "HEADER"),
+ logger.coloring(v, "OKGREEN")))
+ if k.isupper():
+ logger.info(placeholder)
+
+
+def print_config(config):
+ """
+ visualize configs
+ Arguments:
+ config: configs
+ """
+ logger.advertise()
+ print_dict(config)
+
+
+def override(dl, ks, v):
+ """
+ Recursively replace dict of list
+ Args:
+ dl(dict or list): dict or list to be replaced
+ ks(list): list of keys
+ v(str): value to be replaced
+ """
+
+ def str2num(v):
+ try:
+ return eval(v)
+ except Exception:
+ return v
+
+ assert isinstance(dl, (list, dict)), ("{} should be a list or a dict")
+ assert len(ks) > 0, ('lenght of keys should larger than 0')
+ if isinstance(dl, list):
+ k = str2num(ks[0])
+ if len(ks) == 1:
+ assert k < len(dl), ('index({}) out of range({})'.format(k, dl))
+ dl[k] = str2num(v)
+ else:
+ override(dl[k], ks[1:], v)
+ else:
+ if len(ks) == 1:
+ # assert ks[0] in dl, ('{} is not exist in {}'.format(ks[0], dl))
+ if not ks[0] in dl:
+ logger.warning('A new filed ({}) detected!'.format(ks[0], dl))
+ dl[ks[0]] = str2num(v)
+ else:
+ override(dl[ks[0]], ks[1:], v)
+
+
+def override_config(config, options=None):
+ """
+ Recursively override the config
+ Args:
+ config(dict): dict to be replaced
+ options(list): list of pairs(key0.key1.idx.key2=value)
+ such as: [
+ 'topk=2',
+ 'VALID.transforms.1.ResizeImage.resize_short=300'
+ ]
+ Returns:
+ config(dict): replaced config
+ """
+ if options is not None:
+ for opt in options:
+ assert isinstance(opt, str), (
+ "option({}) should be a str".format(opt))
+ assert "=" in opt, (
+ "option({}) should contain a ="
+ "to distinguish between key and value".format(opt))
+ pair = opt.split('=')
+ assert len(pair) == 2, ("there can be only a = in the option")
+ key, value = pair
+ keys = key.split('.')
+ override(config, keys, value)
+ return config
+
+
+def get_config(fname, overrides=None, show=True):
+ """
+ Read config from file
+ """
+ assert os.path.exists(fname), (
+ 'config file({}) is not exist'.format(fname))
+ config = parse_config(fname)
+ override_config(config, overrides)
+ if show:
+ print_config(config)
+ # check_config(config)
+ return config
+
+
+def parser():
+ parser = argparse.ArgumentParser("generic-image-rec train script")
+ parser.add_argument(
+ '-c',
+ '--config',
+ type=str,
+ default='configs/config.yaml',
+ help='config file path')
+ parser.add_argument(
+ '-o',
+ '--override',
+ action='append',
+ default=[],
+ help='config options to be overridden')
+ parser.add_argument(
+ '-v',
+ '--verbose',
+ action='store_true',
+ help='wheather print the config info')
+ return parser
+
+
+def parse_args():
+ args = parser().parse_args()
+ return args
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/get_image_list.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/get_image_list.py
new file mode 100644
index 000000000..6f10935ad
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/get_image_list.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import argparse
+import base64
+import numpy as np
+
+
+def get_image_list(img_file):
+ imgs_lists = []
+ if img_file is None or not os.path.exists(img_file):
+ raise Exception("not found any img file in {}".format(img_file))
+
+ img_end = ['jpg', 'png', 'jpeg', 'JPEG', 'JPG', 'bmp']
+ if os.path.isfile(img_file) and img_file.split('.')[-1] in img_end:
+ imgs_lists.append(img_file)
+ elif os.path.isdir(img_file):
+ for single_file in os.listdir(img_file):
+ if single_file.split('.')[-1] in img_end:
+ imgs_lists.append(os.path.join(img_file, single_file))
+ if len(imgs_lists) == 0:
+ raise Exception("not found any img file in {}".format(img_file))
+ imgs_lists = sorted(imgs_lists)
+ return imgs_lists
+
+
+def get_image_list_from_label_file(image_path, label_file_path):
+ imgs_lists = []
+ gt_labels = []
+ with open(label_file_path, "r") as fin:
+ lines = fin.readlines()
+ for line in lines:
+ image_name, label = line.strip("\n").split()
+ label = int(label)
+ imgs_lists.append(os.path.join(image_path, image_name))
+ gt_labels.append(int(label))
+ return imgs_lists, gt_labels
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/logger.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/logger.py
new file mode 100644
index 000000000..ece852624
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/logger.py
@@ -0,0 +1,120 @@
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import datetime
+
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s %(levelname)s: %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S")
+
+
+def time_zone(sec, fmt):
+ real_time = datetime.datetime.now()
+ return real_time.timetuple()
+
+
+logging.Formatter.converter = time_zone
+_logger = logging.getLogger(__name__)
+
+Color = {
+ 'RED': '\033[31m',
+ 'HEADER': '\033[35m', # deep purple
+ 'PURPLE': '\033[95m', # purple
+ 'OKBLUE': '\033[94m',
+ 'OKGREEN': '\033[92m',
+ 'WARNING': '\033[93m',
+ 'FAIL': '\033[91m',
+ 'ENDC': '\033[0m'
+}
+
+
+def coloring(message, color="OKGREEN"):
+ assert color in Color.keys()
+ if os.environ.get('PADDLECLAS_COLORING', False):
+ return Color[color] + str(message) + Color["ENDC"]
+ else:
+ return message
+
+
+def anti_fleet(log):
+ """
+ logs will print multi-times when calling Fleet API.
+ Only display single log and ignore the others.
+ """
+
+ def wrapper(fmt, *args):
+ if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0:
+ log(fmt, *args)
+
+ return wrapper
+
+
+@anti_fleet
+def info(fmt, *args):
+ _logger.info(fmt, *args)
+
+
+@anti_fleet
+def warning(fmt, *args):
+ _logger.warning(coloring(fmt, "RED"), *args)
+
+
+@anti_fleet
+def error(fmt, *args):
+ _logger.error(coloring(fmt, "FAIL"), *args)
+
+
+def scaler(name, value, step, writer):
+ """
+ This function will draw a scalar curve generated by the visualdl.
+ Usage: Install visualdl: pip3 install visualdl==2.0.0b4
+ and then:
+ visualdl --logdir ./scalar --host 0.0.0.0 --port 8830
+ to preview loss corve in real time.
+ """
+ writer.add_scalar(tag=name, step=step, value=value)
+
+
+def advertise():
+ """
+ Show the advertising message like the following:
+
+ ===========================================================
+ == PaddleClas is powered by PaddlePaddle ! ==
+ ===========================================================
+ == ==
+ == For more info please go to the following website. ==
+ == ==
+ == https://github.com/PaddlePaddle/PaddleClas ==
+ ===========================================================
+
+ """
+ copyright = "PaddleClas is powered by PaddlePaddle !"
+ ad = "For more info please go to the following website."
+ website = "https://github.com/PaddlePaddle/PaddleClas"
+ AD_LEN = 6 + len(max([copyright, ad, website], key=len))
+
+ info(
+ coloring("\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n".format(
+ "=" * (AD_LEN + 4),
+ "=={}==".format(copyright.center(AD_LEN)),
+ "=" * (AD_LEN + 4),
+ "=={}==".format(' ' * AD_LEN),
+ "=={}==".format(ad.center(AD_LEN)),
+ "=={}==".format(' ' * AD_LEN),
+ "=={}==".format(website.center(AD_LEN)),
+ "=" * (AD_LEN + 4), ), "RED"))
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/predictor.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/predictor.py
new file mode 100644
index 000000000..11f153071
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/utils/predictor.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import argparse
+import base64
+import shutil
+import cv2
+import numpy as np
+
+from paddle.inference import Config
+from paddle.inference import create_predictor
+
+
+class Predictor(object):
+ def __init__(self, args, inference_model_dir=None):
+ # HALF precission predict only work when using tensorrt
+ if args.use_fp16 is True:
+ assert args.use_tensorrt is True
+ self.args = args
+ self.paddle_predictor, self.config = self.create_paddle_predictor(
+ args, inference_model_dir)
+
+ def predict(self, image):
+ raise NotImplementedError
+
+ def create_paddle_predictor(self, args, inference_model_dir=None):
+ if inference_model_dir is None:
+ inference_model_dir = args.inference_model_dir
+ params_file = os.path.join(inference_model_dir, "inference.pdiparams")
+ model_file = os.path.join(inference_model_dir, "inference.pdmodel")
+ config = Config(model_file, params_file)
+
+ if args.use_gpu:
+ config.enable_use_gpu(args.gpu_mem, 0)
+ else:
+ config.disable_gpu()
+ if args.enable_mkldnn:
+ # cache 10 different shapes for mkldnn to avoid memory leak
+ config.set_mkldnn_cache_capacity(10)
+ config.enable_mkldnn()
+ config.set_cpu_math_library_num_threads(args.cpu_num_threads)
+
+ if args.enable_profile:
+ config.enable_profile()
+ config.disable_glog_info()
+ config.switch_ir_optim(args.ir_optim) # default true
+ if args.use_tensorrt:
+ config.enable_tensorrt_engine(
+ precision_mode=Config.Precision.Half
+ if args.use_fp16 else Config.Precision.Float32,
+ max_batch_size=args.batch_size,
+ min_subgraph_size=30)
+
+ config.enable_memory_optim()
+ # use zero copy
+ config.switch_use_feed_fetch_ops(False)
+ predictor = create_predictor(config)
+
+ return predictor, config
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/x2coco.py b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/x2coco.py
new file mode 100644
index 000000000..2d0e64e64
--- /dev/null
+++ b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/x2coco.py
@@ -0,0 +1,450 @@
+#!/usr/bin/env python
+# coding: utf-8
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import glob
+import json
+import os
+import os.path as osp
+import shutil
+import xml.etree.ElementTree as ET
+from tqdm import tqdm
+
+import numpy as np
+import PIL.ImageDraw
+
+label_to_num = {}
+categories_list = []
+labels_list = []
+
+
+class MyEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, np.integer):
+ return int(obj)
+ elif isinstance(obj, np.floating):
+ return float(obj)
+ elif isinstance(obj, np.ndarray):
+ return obj.tolist()
+ else:
+ return super(MyEncoder, self).default(obj)
+
+
+def images_labelme(data, num):
+ image = {}
+ image['height'] = data['imageHeight']
+ image['width'] = data['imageWidth']
+ image['id'] = num + 1
+ if '\\' in data['imagePath']:
+ image['file_name'] = data['imagePath'].split('\\')[-1]
+ else:
+ image['file_name'] = data['imagePath'].split('/')[-1]
+ image['file_name'] = image['file_name'].rstrip()
+ return image
+
+
+def images_cityscape(data, num, img_file):
+ image = {}
+ image['height'] = data['imgHeight']
+ image['width'] = data['imgWidth']
+ image['id'] = num + 1
+ image['file_name'] = img_file
+ image['file_name'] = image['file_name'].rstrip()
+ return image
+
+
+def categories(label, labels_list):
+ category = {}
+ category['supercategory'] = 'component'
+ category['id'] = len(labels_list) + 1
+ category['name'] = label
+ return category
+
+
+def annotations_rectangle(points, label, image_num, object_num, label_to_num):
+ annotation = {}
+ seg_points = np.asarray(points).copy()
+ seg_points[1, :] = np.asarray(points)[2, :]
+ seg_points[2, :] = np.asarray(points)[1, :]
+ annotation['segmentation'] = [list(seg_points.flatten())]
+ annotation['iscrowd'] = 0
+ annotation['image_id'] = image_num + 1
+ annotation['bbox'] = list(
+ map(float, [
+ points[0][0], points[0][1], points[1][0] - points[0][0], points[1][
+ 1] - points[0][1]
+ ]))
+ annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
+ annotation['category_id'] = label_to_num[label]
+ annotation['id'] = object_num + 1
+ return annotation
+
+
+def annotations_polygon(height, width, points, label, image_num, object_num,
+ label_to_num):
+ annotation = {}
+ annotation['segmentation'] = [list(np.asarray(points).flatten())]
+ annotation['iscrowd'] = 0
+ annotation['image_id'] = image_num + 1
+ annotation['bbox'] = list(map(float, get_bbox(height, width, points)))
+ annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
+ annotation['category_id'] = label_to_num[label]
+ annotation['id'] = object_num + 1
+ return annotation
+
+
+def get_bbox(height, width, points):
+ polygons = points
+ mask = np.zeros([height, width], dtype=np.uint8)
+ mask = PIL.Image.fromarray(mask)
+ xy = list(map(tuple, polygons))
+ PIL.ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
+ mask = np.array(mask, dtype=bool)
+ index = np.argwhere(mask == 1)
+ rows = index[:, 0]
+ clos = index[:, 1]
+ left_top_r = np.min(rows)
+ left_top_c = np.min(clos)
+ right_bottom_r = np.max(rows)
+ right_bottom_c = np.max(clos)
+ return [
+ left_top_c, left_top_r, right_bottom_c - left_top_c,
+ right_bottom_r - left_top_r
+ ]
+
+
+def deal_json(ds_type, img_path, json_path):
+ data_coco = {}
+ images_list = []
+ annotations_list = []
+ image_num = -1
+ object_num = -1
+ for img_file in os.listdir(img_path):
+ img_label = os.path.splitext(img_file)[0]
+ if img_file.split('.')[
+ -1] not in ['bmp', 'jpg', 'jpeg', 'png', 'JPEG', 'JPG', 'PNG']:
+ continue
+ label_file = osp.join(json_path, img_label + '.json')
+ print('Generating dataset from:', label_file)
+ image_num = image_num + 1
+ with open(label_file) as f:
+ data = json.load(f)
+ if ds_type == 'labelme':
+ images_list.append(images_labelme(data, image_num))
+ elif ds_type == 'cityscape':
+ images_list.append(images_cityscape(data, image_num, img_file))
+ if ds_type == 'labelme':
+ for shapes in data['shapes']:
+ object_num = object_num + 1
+ label = shapes['label']
+ if label not in labels_list:
+ categories_list.append(categories(label, labels_list))
+ labels_list.append(label)
+ label_to_num[label] = len(labels_list)
+ p_type = shapes['shape_type']
+ if p_type == 'polygon':
+ points = shapes['points']
+ annotations_list.append(
+ annotations_polygon(data['imageHeight'], data[
+ 'imageWidth'], points, label, image_num,
+ object_num, label_to_num))
+
+ if p_type == 'rectangle':
+ (x1, y1), (x2, y2) = shapes['points']
+ x1, x2 = sorted([x1, x2])
+ y1, y2 = sorted([y1, y2])
+ points = [[x1, y1], [x2, y2], [x1, y2], [x2, y1]]
+ annotations_list.append(
+ annotations_rectangle(points, label, image_num,
+ object_num, label_to_num))
+ elif ds_type == 'cityscape':
+ for shapes in data['objects']:
+ object_num = object_num + 1
+ label = shapes['label']
+ if label not in labels_list:
+ categories_list.append(categories(label, labels_list))
+ labels_list.append(label)
+ label_to_num[label] = len(labels_list)
+ points = shapes['polygon']
+ annotations_list.append(
+ annotations_polygon(data['imgHeight'], data[
+ 'imgWidth'], points, label, image_num, object_num,
+ label_to_num))
+ data_coco['images'] = images_list
+ data_coco['categories'] = categories_list
+ data_coco['annotations'] = annotations_list
+ return data_coco
+
+
+def voc_get_label_anno(ann_dir_path, ann_ids_path, labels_path):
+ with open(labels_path, 'r') as f:
+ labels_str = f.read().split()
+ labels_ids = list(range(1, len(labels_str) + 1))
+
+ with open(ann_ids_path, 'r') as f:
+ ann_ids = [lin.strip().split(' ')[-1] for lin in f.readlines()]
+
+ ann_paths = []
+ for aid in ann_ids:
+ if aid.endswith('xml'):
+ ann_path = os.path.join(ann_dir_path, aid)
+ else:
+ ann_path = os.path.join(ann_dir_path, aid + '.xml')
+ ann_paths.append(ann_path)
+
+ return dict(zip(labels_str, labels_ids)), ann_paths
+
+
+def voc_get_image_info(annotation_root, im_id):
+ filename = annotation_root.findtext('filename')
+ assert filename is not None
+ img_name = os.path.basename(filename)
+
+ size = annotation_root.find('size')
+ width = float(size.findtext('width'))
+ height = float(size.findtext('height'))
+
+ image_info = {
+ 'file_name': filename,
+ 'height': height,
+ 'width': width,
+ 'id': im_id
+ }
+ return image_info
+
+
+def voc_get_coco_annotation(obj, label2id):
+ label = obj.findtext('name')
+ assert label in label2id, "label is not in label2id."
+ category_id = label2id[label]
+ bndbox = obj.find('bndbox')
+ xmin = float(bndbox.findtext('xmin'))
+ ymin = float(bndbox.findtext('ymin'))
+ xmax = float(bndbox.findtext('xmax'))
+ ymax = float(bndbox.findtext('ymax'))
+ assert xmax > xmin and ymax > ymin, "Box size error."
+ o_width = xmax - xmin
+ o_height = ymax - ymin
+ anno = {
+ 'area': o_width * o_height,
+ 'iscrowd': 0,
+ 'bbox': [xmin, ymin, o_width, o_height],
+ 'category_id': category_id,
+ 'ignore': 0,
+ }
+ return anno
+
+
+def voc_xmls_to_cocojson(annotation_paths, label2id, output_dir, output_file):
+ output_json_dict = {
+ "images": [],
+ "type": "instances",
+ "annotations": [],
+ "categories": []
+ }
+ bnd_id = 1 # bounding box start id
+ im_id = 0
+ print('Start converting !')
+ for a_path in tqdm(annotation_paths):
+ # Read annotation xml
+ ann_tree = ET.parse(a_path)
+ ann_root = ann_tree.getroot()
+
+ img_info = voc_get_image_info(ann_root, im_id)
+ output_json_dict['images'].append(img_info)
+ print(a_path)
+ for obj in ann_root.findall('object'):
+ ann = voc_get_coco_annotation(obj=obj, label2id=label2id)
+ ann.update({'image_id': im_id, 'id': bnd_id})
+ output_json_dict['annotations'].append(ann)
+ bnd_id = bnd_id + 1
+ im_id += 1
+
+ for label, label_id in label2id.items():
+ category_info = {'supercategory': 'none', 'id': label_id, 'name': label}
+ output_json_dict['categories'].append(category_info)
+ output_file = os.path.join(output_dir, output_file)
+ with open(output_file, 'w') as f:
+ output_json = json.dumps(output_json_dict)
+ f.write(output_json)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument(
+ '--dataset_type',
+ help='the type of dataset, can be `voc`, `labelme` or `cityscape`')
+ parser.add_argument('--json_input_dir', help='input annotated directory')
+ parser.add_argument('--image_input_dir', help='image directory')
+ parser.add_argument(
+ '--output_dir', help='output dataset directory', default='./')
+ parser.add_argument(
+ '--train_proportion',
+ help='the proportion of train dataset',
+ type=float,
+ default=1.0)
+ parser.add_argument(
+ '--val_proportion',
+ help='the proportion of validation dataset',
+ type=float,
+ default=0.0)
+ parser.add_argument(
+ '--test_proportion',
+ help='the proportion of test dataset',
+ type=float,
+ default=0.0)
+ parser.add_argument(
+ '--voc_anno_dir',
+ help='In Voc format dataset, path to annotation files directory.',
+ type=str,
+ default=None)
+ parser.add_argument(
+ '--voc_anno_list',
+ help='In Voc format dataset, path to annotation files ids list.',
+ type=str,
+ default=None)
+ parser.add_argument(
+ '--voc_label_list',
+ help='In Voc format dataset, path to label list. The content of each line is a category.',
+ type=str,
+ default=None)
+ parser.add_argument(
+ '--voc_out_name',
+ type=str,
+ default='voc.json',
+ help='In Voc format dataset, path to output json file')
+ args = parser.parse_args()
+ try:
+ assert args.dataset_type in ['voc', 'labelme', 'cityscape']
+ except AssertionError as e:
+ print(
+ 'Now only support the voc, cityscape dataset and labelme dataset!!')
+ os._exit(0)
+
+ if args.dataset_type == 'voc':
+ assert args.voc_anno_dir and args.voc_anno_list and args.voc_label_list
+ label2id, ann_paths = voc_get_label_anno(
+ args.voc_anno_dir, args.voc_anno_list, args.voc_label_list)
+ voc_xmls_to_cocojson(
+ annotation_paths=ann_paths,
+ label2id=label2id,
+ output_dir=args.output_dir,
+ output_file=args.voc_out_name)
+ else:
+ try:
+ assert os.path.exists(args.json_input_dir)
+ except AssertionError as e:
+ print('The json folder does not exist!')
+ os._exit(0)
+ try:
+ assert os.path.exists(args.image_input_dir)
+ except AssertionError as e:
+ print('The image folder does not exist!')
+ os._exit(0)
+ try:
+ assert abs(args.train_proportion + args.val_proportion \
+ + args.test_proportion - 1.0) < 1e-5
+ except AssertionError as e:
+ print(
+ 'The sum of pqoportion of training, validation and test datase must be 1!'
+ )
+ os._exit(0)
+
+ # Allocate the dataset.
+ total_num = len(glob.glob(osp.join(args.json_input_dir, '*.json')))
+ if args.train_proportion != 0:
+ train_num = int(total_num * args.train_proportion)
+ out_dir = args.output_dir + '/train'
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ else:
+ train_num = 0
+ if args.val_proportion == 0.0:
+ val_num = 0
+ test_num = total_num - train_num
+ out_dir = args.output_dir + '/test'
+ if args.test_proportion != 0.0 and not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ else:
+ val_num = int(total_num * args.val_proportion)
+ test_num = total_num - train_num - val_num
+ val_out_dir = args.output_dir + '/val'
+ if not os.path.exists(val_out_dir):
+ os.makedirs(val_out_dir)
+ test_out_dir = args.output_dir + '/test'
+ if args.test_proportion != 0.0 and not os.path.exists(test_out_dir):
+ os.makedirs(test_out_dir)
+ count = 1
+ for img_name in os.listdir(args.image_input_dir):
+ if count <= train_num:
+ if osp.exists(args.output_dir + '/train/'):
+ shutil.copyfile(
+ osp.join(args.image_input_dir, img_name),
+ osp.join(args.output_dir + '/train/', img_name))
+ else:
+ if count <= train_num + val_num:
+ if osp.exists(args.output_dir + '/val/'):
+ shutil.copyfile(
+ osp.join(args.image_input_dir, img_name),
+ osp.join(args.output_dir + '/val/', img_name))
+ else:
+ if osp.exists(args.output_dir + '/test/'):
+ shutil.copyfile(
+ osp.join(args.image_input_dir, img_name),
+ osp.join(args.output_dir + '/test/', img_name))
+ count = count + 1
+
+ # Deal with the json files.
+ if not os.path.exists(args.output_dir + '/annotations'):
+ os.makedirs(args.output_dir + '/annotations')
+ if args.train_proportion != 0:
+ train_data_coco = deal_json(args.dataset_type,
+ args.output_dir + '/train',
+ args.json_input_dir)
+ train_json_path = osp.join(args.output_dir + '/annotations',
+ 'instance_train.json')
+ json.dump(
+ train_data_coco,
+ open(train_json_path, 'w'),
+ indent=4,
+ cls=MyEncoder)
+ if args.val_proportion != 0:
+ val_data_coco = deal_json(args.dataset_type,
+ args.output_dir + '/val',
+ args.json_input_dir)
+ val_json_path = osp.join(args.output_dir + '/annotations',
+ 'instance_val.json')
+ json.dump(
+ val_data_coco,
+ open(val_json_path, 'w'),
+ indent=4,
+ cls=MyEncoder)
+ if args.test_proportion != 0:
+ test_data_coco = deal_json(args.dataset_type,
+ args.output_dir + '/test',
+ args.json_input_dir)
+ test_json_path = osp.join(args.output_dir + '/annotations',
+ 'instance_test.json')
+ json.dump(
+ test_data_coco,
+ open(test_json_path, 'w'),
+ indent=4,
+ cls=MyEncoder)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/index_infer_result.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/index_infer_result.png
new file mode 100644
index 000000000..a419074b7
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/index_infer_result.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/infer_result.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/infer_result.png
new file mode 100644
index 000000000..bf299fd11
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/infer_result.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/label_img.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/label_img.png
new file mode 100644
index 000000000..46aef3f11
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/label_img.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/result_5.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/result_5.png
new file mode 100644
index 000000000..511156661
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/result_5.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/xml_content.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/xml_content.png
new file mode 100644
index 000000000..f4db8d49c
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/docs/images/xml_content.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_1.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_1.png
new file mode 100644
index 000000000..ebce35597
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_1.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_2.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_2.png
new file mode 100644
index 000000000..466689918
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_2.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_3.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_3.png
new file mode 100644
index 000000000..6ada31633
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_3.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_4.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_4.png
new file mode 100644
index 000000000..ad7e495c6
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_4.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_5.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_5.png
new file mode 100644
index 000000000..8585dd300
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/pic_5.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_1.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_1.png
new file mode 100644
index 000000000..fb1b850f4
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_1.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_2.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_2.png
new file mode 100644
index 000000000..87c36dd61
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_2.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_3.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_3.png
new file mode 100644
index 000000000..050aebd3a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_3.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_4.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_4.png
new file mode 100644
index 000000000..a0a34bf6a
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_4.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_5.png b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_5.png
new file mode 100644
index 000000000..511156661
Binary files /dev/null and b/Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/result/result_5.png differ
diff --git a/Paddle_Industry_Practice_Sample_Library/Fall_Identify/divide_dataset.py b/Paddle_Industry_Practice_Sample_Library/Fall_Identify/divide_dataset.py
old mode 100644
new mode 100755
index 2c4fa269b..072cd25ce
--- a/Paddle_Industry_Practice_Sample_Library/Fall_Identify/divide_dataset.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fall_Identify/divide_dataset.py
@@ -2,19 +2,19 @@
import os
#生成train.txt和val.txt
random.seed(2020)
-xml_dir = 'Annotations'#标签文件地址
-img_dir = 'images'#图像文件地址
+xml_dir = 'Annotations' #标签文件地址
+img_dir = 'images' #图像文件地址
path_list = list()
for img in os.listdir(img_dir):
- img_path = os.path.join(img_dir,img)
- xml_path = os.path.join(xml_dir,img.replace('jpg', 'xml'))
+ img_path = os.path.join(img_dir, img)
+ xml_path = os.path.join(xml_dir, img.replace('jpg', 'xml'))
path_list.append((img_path, xml_path))
random.shuffle(path_list)
ratio = 0.9
-train_f = open('train.txt','w') #生成训练文件
-val_f = open('val.txt' ,'w')#生成验证文件
+train_f = open('train.txt', 'w') #生成训练文件
+val_f = open('val.txt', 'w') #生成验证文件
-for i ,content in enumerate(path_list):
+for i, content in enumerate(path_list):
img, xml = content
text = img + ' ' + xml + '\n'
if i < len(path_list) * ratio:
@@ -25,7 +25,7 @@
val_f.close()
#生成标签文档
-label = ['fall']#设置你想检测的类别
+label = ['fall'] #设置你想检测的类别
with open('label_list.txt', 'w') as f:
for text in label:
- f.write(text+'\n')
\ No newline at end of file
+ f.write(text + '\n')
diff --git a/Paddle_Industry_Practice_Sample_Library/Fall_Identify/images/demo.jpeg b/Paddle_Industry_Practice_Sample_Library/Fall_Identify/images/demo.jpeg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fall_Identify/images/modify.png b/Paddle_Industry_Practice_Sample_Library/Fall_Identify/images/modify.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fall_Identify/images/modify_2.png b/Paddle_Industry_Practice_Sample_Library/Fall_Identify/images/modify_2.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Figure_Skating/README.md b/Paddle_Industry_Practice_Sample_Library/Figure_Skating/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/Instructions.md b/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/Instructions.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/skeleton.py b/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/skeleton.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/skeleton_pipeline.py b/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/skeleton_pipeline.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/stgcn.py b/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/stgcn.py
old mode 100644
new mode 100755
index ae9ad7048..2524d685f
--- a/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/stgcn.py
+++ b/Paddle_Industry_Practice_Sample_Library/Figure_Skating/code/stgcn.py
@@ -100,11 +100,11 @@ def get_edge(self, layout):
elif layout == 'ntu-rgb+d':
self.num_node = 25
self_link = [(i, i) for i in range(self.num_node)]
- neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21),
- (6, 5), (7, 6), (8, 7), (9, 21), (10, 9),
- (11, 10), (12, 11), (13, 1), (14, 13), (15, 14),
- (16, 15), (17, 1), (18, 17), (19, 18), (20, 19),
- (22, 23), (23, 8), (24, 25), (25, 12)]
+ neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5),
+ (7, 6), (8, 7), (9, 21), (10, 9), (11, 10),
+ (12, 11), (13, 1), (14, 13), (15, 14), (16, 15),
+ (17, 1), (18, 17), (19, 18), (20, 19), (22, 23),
+ (23, 8), (24, 25), (25, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 21 - 1
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/README.md b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/README.md
old mode 100644
new mode 100755
index 66c4cfcb3..62c233b28
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/README.md
@@ -92,7 +92,7 @@ xml文件中包含以下字段:
```
├── dataset
- ├── annotations
+ ├── annotations
│ ├── fire_000001.xml
│ ├── fire_000002.xml
│ ├── fire_000003.xml
@@ -317,7 +317,7 @@ python infer.py
**说明:** 从表1的实验结论中可以发现,有些优化策略在精度优化上起到了正向结果,有些策略则相反。这些结论在不同的模型和不同的数据集上并不是相通的,还需根据具体情况验证。
- 本实验未提供"背景图"数据集(包含5116张图片),大家自行选择不包含的烟雾和火灾的数据作为负样本即可。
+ 本实验未提供"背景图"数据集(包含5116张图片),大家自行选择不包含的烟雾和火灾的数据作为负样本即可。
**模型优化思路**:
@@ -347,6 +347,3 @@ python infer.py
## 开源数据
* 非常感谢[gengyanlei](https://github.com/gengyanlei/fire-smoke-detect-yolov4)和[Thomas-yanxin](https://aistudio.baidu.com/aistudio/datasetdetail/90352/0)开源的火灾和烟雾数据集
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/accuracy_improvement.md b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/accuracy_improvement.md
old mode 100644
new mode 100755
index d5523f8cc..0d61ea9b9
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/accuracy_improvement.md
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/accuracy_improvement.md
@@ -49,4 +49,3 @@
| PP-YOLOv2+ResNet50(Baseline) | 95.1 | 23.22 |
| PP-YOLOv2+ResNet50+aug+COCO预训练+SPP+背景图 | 93.9 | 1.1 |
| PP-YOLOv2+ResNet101+aug+COCO预训练+SPP+背景图 | **96** | **2.2** |
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/1.train_ppyolov2_imagenet.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/1.train_ppyolov2_imagenet.py
old mode 100644
new mode 100755
index c81bd6e54..2d5129f52
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/1.train_ppyolov2_imagenet.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/1.train_ppyolov2_imagenet.py
@@ -6,17 +6,16 @@
import paddlex as pdx
from paddlex import transforms as T
-
# 定义训练和验证时的transforms
train_transforms = T.Compose([
- T.BatchRandomResize(
- target_sizes=[
- 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
- 736, 768
- ],
- interp='RANDOM'), T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
- ])
+ T.BatchRandomResize(
+ target_sizes=[
+ 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
+ 736, 768
+ ],
+ interp='RANDOM'), T.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+])
eval_transforms = T.Compose([
T.Resize(
@@ -52,4 +51,4 @@
warmup_start_lr=0.0,
lr_decay_epochs=[105, 135, 150],
save_interval_epochs=5,
- save_dir='output/ppyolov2_r50vd_dcn')
\ No newline at end of file
+ save_dir='output/ppyolov2_r50vd_dcn')
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/2.train_ppyolov2_imagenet_aug.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/2.train_ppyolov2_imagenet_aug.py
old mode 100644
new mode 100755
index 923c5abe3..11bc97438
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/2.train_ppyolov2_imagenet_aug.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/2.train_ppyolov2_imagenet_aug.py
@@ -6,18 +6,17 @@
import paddlex as pdx
from paddlex import transforms as T
-
# 定义训练和验证时的transforms
train_transforms = T.Compose([
- T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
- T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(),
- T.RandomHorizontalFlip(), T.BatchRandomResize(
- target_sizes=[
- 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
- 736, 768
- ],
- interp='RANDOM'), T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
+ T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(),
+ T.RandomHorizontalFlip(), T.BatchRandomResize(
+ target_sizes=[
+ 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
+ 736, 768
+ ],
+ interp='RANDOM'), T.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
eval_transforms = T.Compose([
@@ -54,4 +53,4 @@
warmup_start_lr=0.0,
lr_decay_epochs=[105, 135, 150],
save_interval_epochs=5,
- save_dir='output/ppyolov2_r50vd_dcn_aug')
\ No newline at end of file
+ save_dir='output/ppyolov2_r50vd_dcn_aug')
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/3.train_ppyolov2_coco.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/3.train_ppyolov2_coco.py
old mode 100644
new mode 100755
index 8cd04481f..4d4fc66a3
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/3.train_ppyolov2_coco.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/3.train_ppyolov2_coco.py
@@ -6,17 +6,16 @@
import paddlex as pdx
from paddlex import transforms as T
-
# 定义训练和验证时的transforms
train_transforms = T.Compose([
- T.BatchRandomResize(
- target_sizes=[
- 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
- 736, 768
- ],
- interp='RANDOM'), T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
- ])
+ T.BatchRandomResize(
+ target_sizes=[
+ 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
+ 736, 768
+ ],
+ interp='RANDOM'), T.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+])
eval_transforms = T.Compose([
T.Resize(
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/4.train_ppyolov2_coco_aug.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/4.train_ppyolov2_coco_aug.py
old mode 100644
new mode 100755
index b98603709..4a65a5e3f
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/4.train_ppyolov2_coco_aug.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/4.train_ppyolov2_coco_aug.py
@@ -6,7 +6,6 @@
import paddlex as pdx
from paddlex import transforms as T
-
# 定义训练和验证时的transforms
train_transforms = T.Compose([
T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/5.train_ppyolov2_coco_aug_noSPP.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/5.train_ppyolov2_coco_aug_noSPP.py
old mode 100644
new mode 100755
index 4a0c1e3ab..f67819f15
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/5.train_ppyolov2_coco_aug_noSPP.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/5.train_ppyolov2_coco_aug_noSPP.py
@@ -6,7 +6,6 @@
import paddlex as pdx
from paddlex import transforms as T
-
# 定义训练和验证时的transforms
train_transforms = T.Compose([
T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
@@ -43,7 +42,8 @@
# 初始化模型,并进行训练
# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3
num_classes = len(train_dataset.labels)
-model = pdx.det.PPYOLOv2(num_classes=num_classes, use_spp=False, backbone='ResNet50_vd_dcn')
+model = pdx.det.PPYOLOv2(
+ num_classes=num_classes, use_spp=False, backbone='ResNet50_vd_dcn')
model.train(
num_epochs=270,
train_dataset=train_dataset,
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/6.train_ppyolov2_r50_aug_COCO_addneg.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/6.train_ppyolov2_r50_aug_COCO_addneg.py
old mode 100644
new mode 100755
index e9a9de926..9d6d206a9
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/6.train_ppyolov2_r50_aug_COCO_addneg.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/6.train_ppyolov2_r50_aug_COCO_addneg.py
@@ -6,18 +6,17 @@
import paddlex as pdx
from paddlex import transforms as T
-
# 定义训练和验证时的transforms
train_transforms = T.Compose([
- T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
- T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(),
- T.RandomHorizontalFlip(), T.BatchRandomResize(
- target_sizes=[
- 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
- 736, 768
- ],
- interp='RANDOM'), T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
+ T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(),
+ T.RandomHorizontalFlip(), T.BatchRandomResize(
+ target_sizes=[
+ 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
+ 736, 768
+ ],
+ interp='RANDOM'), T.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
eval_transforms = T.Compose([
@@ -44,8 +43,8 @@
shuffle=False)
# 把背景图片加入训练集中
-train_dataset.add_negative_samples(image_dir='/home/aistudio/dataset/train_neg')
-
+train_dataset.add_negative_samples(
+ image_dir='/home/aistudio/dataset/train_neg')
# 初始化模型,并进行训练
# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3
@@ -62,4 +61,4 @@
warmup_start_lr=0.0,
lr_decay_epochs=[105, 135, 150],
save_interval_epochs=5,
- save_dir='output/ppyolov2_r50vd_dcn_coco_aug_addneg')
\ No newline at end of file
+ save_dir='output/ppyolov2_r50vd_dcn_coco_aug_addneg')
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/7.train_ppyolov2_r101_aug_COCO_addneg.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/7.train_ppyolov2_r101_aug_COCO_addneg.py
old mode 100644
new mode 100755
index 384fddec9..c08ca344d
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/7.train_ppyolov2_r101_aug_COCO_addneg.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/7.train_ppyolov2_r101_aug_COCO_addneg.py
@@ -6,18 +6,17 @@
import paddlex as pdx
from paddlex import transforms as T
-
# 定义训练和验证时的transforms
train_transforms = T.Compose([
- T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
- T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(),
- T.RandomHorizontalFlip(), T.BatchRandomResize(
- target_sizes=[
- 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
- 736, 768
- ],
- interp='RANDOM'), T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
+ T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(),
+ T.RandomHorizontalFlip(), T.BatchRandomResize(
+ target_sizes=[
+ 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
+ 736, 768
+ ],
+ interp='RANDOM'), T.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
eval_transforms = T.Compose([
@@ -43,7 +42,8 @@
shuffle=False)
# 初始化模型,并进行训练
-train_dataset.add_negative_samples(image_dir='.//home/aistudio/dataset/train_neg')
+train_dataset.add_negative_samples(
+ image_dir='.//home/aistudio/dataset/train_neg')
# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3
num_classes = len(train_dataset.labels)
@@ -57,6 +57,6 @@
learning_rate=0.000125,
warmup_steps=1000,
warmup_start_lr=0.0,
- lr_decay_epochs=[210,240],
+ lr_decay_epochs=[210, 240],
save_interval_epochs=5,
- save_dir='output/ppyolov2_r101vd_dcn_aug_coco_addneg')
\ No newline at end of file
+ save_dir='output/ppyolov2_r101vd_dcn_aug_coco_addneg')
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/8.train_ppyolo.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/8.train_ppyolo.py
old mode 100644
new mode 100755
index c75dd9baa..9ec2c41d8
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/8.train_ppyolo.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/8.train_ppyolo.py
@@ -6,7 +6,6 @@
import paddlex as pdx
from paddlex import transforms as T
-
# 定义训练和验证时的transforms
train_transforms = T.Compose([
T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
@@ -58,4 +57,3 @@
lr_decay_epochs=[85, 135],
save_interval_epochs=5,
save_dir='output/ppyolo_r50vd_dcn_coco_aug')
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/9.train_yolov3_coco.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/9.train_yolov3_coco.py
old mode 100644
new mode 100755
index 5e2187ab8..25ff3207c
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/9.train_yolov3_coco.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/9.train_yolov3_coco.py
@@ -6,16 +6,15 @@
import paddlex as pdx
from paddlex import transforms as T
-
# 定义训练和验证时的transforms
train_transforms = T.Compose([
- T.BatchRandomResize(
- target_sizes=[
- 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
- 736, 768
- ],
- interp='RANDOM'), T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ T.BatchRandomResize(
+ target_sizes=[
+ 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
+ 736, 768
+ ],
+ interp='RANDOM'), T.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
eval_transforms = T.Compose([
@@ -56,4 +55,3 @@
lr_decay_epochs=[216, 243],
save_interval_epochs=5,
save_dir='output/yolov3_darknet53_coco')
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/infer.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/infer.py
old mode 100644
new mode 100755
index b34a25c04..b9e6a6eab
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/infer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/infer.py
@@ -3,8 +3,7 @@
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import paddlex as pdx
-predictor = pdx.deploy.Predictor(model_dir='frcnn_dcn_inference_model/inference_model',
- use_gpu=True)
-result = predictor.predict(img_file='test_imgs/fire_1.jpg',
- warmup_iters=100,
- repeats=200)
+predictor = pdx.deploy.Predictor(
+ model_dir='frcnn_dcn_inference_model/inference_model', use_gpu=True)
+result = predictor.predict(
+ img_file='test_imgs/fire_1.jpg', warmup_iters=100, repeats=200)
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/metric.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/metric.py
old mode 100644
new mode 100755
index 453e12b02..7faadb86a
--- a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/metric.py
@@ -1,17 +1,18 @@
# coding:utf-8
import os
-os.environ['CUDA_VISIBLE_DEVICES']='0'
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import numpy as np
import shutil
import paddlex as pdx
+
def cal_image_level(model, dataset_dir):
file_list = os.path.join(dataset_dir, 'test_list_2.txt')
threshold = 0.4
# threshold = 0.5
- matrix = [[0,0],[0,0]]
+ matrix = [[0, 0], [0, 0]]
fire_to_no = []
no_to_fire = []
@@ -52,64 +53,71 @@ def cal_image_level(model, dataset_dir):
keep_results = [keep_results[k]
for k in sorted_idxs] if keep_results else []
- if len(keep_results)>0:
+ if len(keep_results) > 0:
predict_label = 1
else:
predict_label = 0
if label == 1:
if label == predict_label:
- matrix[0][0]+=1
+ matrix[0][0] += 1
else:
- matrix[1][0]+=1
+ matrix[1][0] += 1
fire_to_no.append(img_file)
name = os.path.basename(img_file)
- shutil.copyfile(img_file, os.path.join(fire_to_no_path, name))
+ shutil.copyfile(img_file,
+ os.path.join(fire_to_no_path, name))
else:
if label == predict_label:
- matrix[1][1]+=1
+ matrix[1][1] += 1
else:
- matrix[0][1]+=1
- no_to_fire.append(img_file)
+ matrix[0][1] += 1
+ no_to_fire.append(img_file)
# 绘制结果
- pdx.det.visualize(img_file, keep_results, threshold=threshold, save_dir=no_to_fire_path)
-
- recall = matrix[0][0] / (matrix[0][0]+matrix[1][0])
- error = matrix[0][1] / (matrix[0][1]+matrix[1][1])
- print('===matrix:',matrix)
- print('===recall:',recall)
- print('===error:',error)
- print('===烟火图被判定为无烟火的图片包含:',len(fire_to_no))
- print('===无烟火图被判定为烟火的图片包含:',len(no_to_fire))
+ pdx.det.visualize(
+ img_file,
+ keep_results,
+ threshold=threshold,
+ save_dir=no_to_fire_path)
+
+ recall = matrix[0][0] / (matrix[0][0] + matrix[1][0])
+ error = matrix[0][1] / (matrix[0][1] + matrix[1][1])
+ print('===matrix:', matrix)
+ print('===recall:', recall)
+ print('===error:', error)
+ print('===烟火图被判定为无烟火的图片包含:', len(fire_to_no))
+ print('===无烟火图被判定为烟火的图片包含:', len(no_to_fire))
return recall, error
+
def select_best(dataset_dir, model_dirs):
max_recall = 0
min_error = 100
- best_recall = [0,0]
- best_error = [0,0]
+ best_recall = [0, 0]
+ best_error = [0, 0]
for model_dir in sorted(os.listdir(model_dirs)):
if 'epoch' in model_dir or 'best_model' in model_dir:
model_dir = os.path.join(model_dirs, model_dir)
model = pdx.load_model(model_dir)
- recall, error = cal_image_level(model, dataset_dir)
- if recall>max_recall:
- best_recall = [model_dir, recall, error]+best_recall
+ recall, error = cal_image_level(model, dataset_dir)
+ if recall > max_recall:
+ best_recall = [model_dir, recall, error] + best_recall
max_recall = recall
if error < min_error:
- best_error = [model_dir, recall, error]+best_error
+ best_error = [model_dir, recall, error] + best_error
min_error = error
else:
continue
- print('==best recall:',best_recall[:-2])
- print('====best error:',best_error[:-2])
- print('====final best:',best_recall[0],best_error[0])
+ print('==best recall:', best_recall[:-2])
+ print('====best error:', best_error[:-2])
+ print('====final best:', best_recall[0], best_error[0])
+
if __name__ == '__main__':
dataset_dir = 'eval_imgs'
model_dirs = 'output/ppyolov2_r50vd_dcn/'
select_best(dataset_dir, model_dirs)
-
+
# # model_dir = 'output/ppyolov2_r50vd_dcn/best_model/'
# model = pdx.load_model(model_dir)
- # recall, error = cal_image_level(model, dataset_dir)
\ No newline at end of file
+ # recall, error = cal_image_level(model, dataset_dir)
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/predict.py b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/predict.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/test_imgs/test.jpg b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/code/test_imgs/test.jpg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/dataset.png b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/dataset.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/demo.png b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/demo.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/deploy.png b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/deploy.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/eval_1.png b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/eval_1.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/eval_2.png b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/eval_2.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/hard_1.png b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/hard_1.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/predict.jpg b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/predict.jpg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/predict_result.png b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/predict_result.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/train.png b/Paddle_Industry_Practice_Sample_Library/Fire_and_Smoke_Detection/docs/images/train.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.github/ISSUE_TEMPLATE/bug_report.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.github/ISSUE_TEMPLATE/bug_report.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.github/ISSUE_TEMPLATE/custom.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.github/ISSUE_TEMPLATE/custom.md
old mode 100644
new mode 100755
index 48d5f81fa..b894315f4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.github/ISSUE_TEMPLATE/custom.md
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.github/ISSUE_TEMPLATE/custom.md
@@ -6,5 +6,3 @@ labels: ''
assignees: ''
---
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.github/ISSUE_TEMPLATE/feature_request.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.github/ISSUE_TEMPLATE/feature_request.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.gitignore b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.gitignore
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.log.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.log.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.pre-commit-config.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.pre-commit-config.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.style.yapf b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/.style.yapf
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/LICENSE b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/LICENSE
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/MANIFEST.in b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/MANIFEST.in
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/README.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/README.md
old mode 100644
new mode 100755
index 370f5420d..5a2776ea4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/README.md
@@ -14,7 +14,7 @@
👀 **报名链接**: https://paddleqiyeban.wjx.cn/vj/QIValIZ.aspx?udsid=419689
**课程回放链接**: https://aistudio.baidu.com/aistudio/course/introduce/6742
- 💖 **欢迎大家扫码入群讨论** 💖
+ 💖 **欢迎大家扫码入群讨论** 💖

diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/README_en.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/README_en.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/README.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/README.md
old mode 100644
new mode 100755
index c4a31f16a..600c86d73
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/README.md
@@ -215,7 +215,7 @@ cd datasets/script && python get_instance_for_bmn.py
```
|-- datasets # 训练数据集和处理脚本
|-- EuroCup2016 # xx数据集
- |-- input_for_bmn # bmn训练的proposal
+ |-- input_for_bmn # bmn训练的proposal
```
#### step2.2 BMN模型训练
@@ -334,7 +334,7 @@ cd datasets/script && python get_instance_for_lstm.py
#### step3.2 LSTM训练
```
-sh run.sh # LSTM 模块
+sh run.sh # LSTM 模块
```
#### step3.3 LSTM模型转为预测模式
@@ -377,4 +377,4 @@ cd predict && python eval.py results.json
- [TSM: Temporal Shift Module for Efficient Video Understanding](https://arxiv.org/pdf/1811.08383.pdf), Ji Lin, Chuang Gan, Song Han
- [BMN: Boundary-Matching Network for Temporal Action Proposal Generation](https://arxiv.org/abs/1907.09702), Tianwei Lin, Xiao Liu, Xin Li, Errui Ding, Shilei Wen.
- [Attention Clusters: Purely Attention Based Local Feature Integration for Video Classification](https://arxiv.org/abs/1711.09550), Xiang Long, Chuang Gan, Gerard de Melo, Jiajun Wu, Xiao Liu, Shilei Wen
-- [YouTube-8M: A Large-Scale Video Classification Benchmark](https://arxiv.org/abs/1609.08675), Sami Abu-El-Haija, Nisarg Kothari, Joonseok Lee, Paul Natsev, George Toderici, Balakrishnan Varadarajan, Sudheendra Vijayanarasimhan
\ No newline at end of file
+- [YouTube-8M: A Large-Scale Video Classification Benchmark](https://arxiv.org/abs/1609.08675), Sami Abu-El-Haija, Nisarg Kothari, Joonseok Lee, Paul Natsev, George Toderici, Balakrishnan Varadarajan, Sudheendra Vijayanarasimhan
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/download_dataset.sh b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/download_dataset.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/label.json b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/label.json
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/label_cls8_train.json b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/label_cls8_train.json
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/label_cls8_val.json b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/label_cls8_val.json
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/url.list b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/url.list
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/url_val.list b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/EuroCup2016/url_val.list
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_frames_pcm.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_frames_pcm.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_bmn.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_bmn.py
old mode 100644
new mode 100755
index c79673274..8dd0a533e
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_bmn.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_bmn.py
@@ -14,10 +14,7 @@
dataset = "datasets/EuroCup2016"
feat_dir = dataset + '/features'
out_dir = dataset + '/input_for_bmn'
-label_files = {
- 'train': 'label.json',
- 'validation': 'label.json'
-}
+label_files = {'train': 'label.json', 'validation': 'label.json'}
global fps
@@ -55,12 +52,9 @@ def gen_gts_for_bmn(gts_data):
if duration > bmn_window:
after_id = cur_action['start_id']
gts_bmn['gts'][-1]['root_actions'].append({
- 'before_id':
- before_id,
- 'after_id':
- after_id,
- 'actions':
- root_actions
+ 'before_id': before_id,
+ 'after_id': after_id,
+ 'actions': root_actions
})
before_id = root_actions[-1]['end_id']
root_actions = [cur_action]
@@ -69,12 +63,9 @@ def gen_gts_for_bmn(gts_data):
if idx == len(sub_actions) - 1:
after_id = max_length
gts_bmn['gts'][-1]['root_actions'].append({
- 'before_id':
- before_id,
- 'after_id':
- after_id,
- 'actions':
- root_actions
+ 'before_id': before_id,
+ 'after_id': after_id,
+ 'actions': root_actions
})
return gts_bmn
@@ -105,18 +96,14 @@ def combile_gts(gts_bmn, gts_process, mode):
# first action
segments.append({
'actions': [root_action['actions'][0]],
- 'before_id':
- root_action['before_id'],
- 'after_id':
- root_action['actions'][1]['start_id']
+ 'before_id': root_action['before_id'],
+ 'after_id': root_action['actions'][1]['start_id']
})
# last action
segments.append({
'actions': [root_action['actions'][-1]],
- 'before_id':
- root_action['actions'][-2]['end_id'],
- 'after_id':
- root_action['after_id']
+ 'before_id': root_action['actions'][-2]['end_id'],
+ 'after_id': root_action['after_id']
})
for segment in segments:
before_id = segment['before_id']
@@ -204,7 +191,7 @@ def save_feature_to_numpy(gts_bmn, folder):
gts_data = json.load(open(label_file, 'rb'))
gts_process = gen_gts_for_bmn(gts_data)
gts_bmn = combile_gts(gts_bmn, gts_process, item)
-
+
gts_bmn = save_feature_to_numpy(gts_bmn, out_dir + '/feature')
with open(out_dir + '/label.json', 'w', encoding='utf-8') as f:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_lstm.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_lstm.py
old mode 100644
new mode 100755
index 844109bd4..62b77bf46
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_lstm.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_lstm.py
@@ -28,8 +28,8 @@ def IoU(e1, e2):
x1 = np.maximum(e1["start"], e2["start"])
x2 = np.minimum(e1["end"], e2["end"])
inter = np.maximum(0.0, x2 - x1)
- iou = 0.0 if (area1 + area2 -
- inter) == 0 else inter * 1.0 / (area1 + area2 - inter)
+ iou = 0.0 if (area1 + area2 - inter) == 0 else inter * 1.0 / (
+ area1 + area2 - inter)
ioa = 0.0 if area2 == 0 else inter * 1.0 / area2
return iou, ioa
@@ -125,13 +125,15 @@ def save_feature(label_info, out_dir):
end_id = proposal['proposal']['end']
# get hit feature
image_feature_hit = image_feature[start_id * fps:end_id * fps]
- audio_feature_hit = audio_feature[min(start_id, max_len_audio
- ):min(end_id, max_len_audio)]
+ audio_feature_hit = audio_feature[min(start_id, max_len_audio):min(
+ end_id, max_len_audio)]
# save
anno_info = {
- 'image_feature': np.array(image_feature_hit, dtype=np.float32),
- 'audio_feature': np.array(audio_feature_hit, dtype=np.float32),
+ 'image_feature': np.array(
+ image_feature_hit, dtype=np.float32),
+ 'audio_feature': np.array(
+ audio_feature_hit, dtype=np.float32),
'feature_fps': fps,
'label_info': proposal,
'video_name': basename
@@ -153,8 +155,8 @@ def save_feature(label_info, out_dir):
prop_data = json.load(open(prop_file, 'rb'))
proposal_data = {}
for item in prop_data:
- proposal_data[os.path.basename(
- item['video_name'])] = item['bmn_results']
+ proposal_data[os.path.basename(item['video_name'])] = item[
+ 'bmn_results']
# get label info
res_bmn = {'fps': 0, 'results': []}
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_lstm_long_proposal.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_lstm_long_proposal.py
old mode 100644
new mode 100755
index 2237645ea..eb4e83bfb
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_lstm_long_proposal.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_lstm_long_proposal.py
@@ -13,10 +13,8 @@
feat_dir = dataset + '/features'
prop_file = dataset + '/feature_bmn/prop.json'
out_dir = dataset + '/input_for_lstm_long_proposal'
-label_files = {
- 'train': 'label.json',
- 'validation': 'label.json'
-}
+label_files = {'train': 'label.json', 'validation': 'label.json'}
+
def IoU(e1, e2):
"""
@@ -27,10 +25,12 @@ def IoU(e1, e2):
x1 = np.maximum(e1["start"], e2["start"])
x2 = np.minimum(e1["end"], e2["end"])
inter = np.maximum(0.0, x2 - x1)
- iou = 0.0 if (area1 + area2 - inter) == 0 else inter * 1.0 / (area1 + area2 - inter)
+ iou = 0.0 if (area1 + area2 - inter) == 0 else inter * 1.0 / (
+ area1 + area2 - inter)
ioa = 0.0 if area2 == 0 else inter * 1.0 / area2
return iou, ioa
+
def clc_iou_of_proposal(proposal, gts):
hit_gts = {}
label = 0
@@ -46,18 +46,22 @@ def clc_iou_of_proposal(proposal, gts):
hit_gts = gt
# label = hit_gts['label_ids'][0]
label = max(hit_gts['label_ids'])
- norm_start = (gt['start_id'] - proposal['start']) * 1.0 / (proposal['end'] - proposal['start'])
+ norm_start = (gt['start_id'] - proposal['start']) * 1.0 / (
+ proposal['end'] - proposal['start'])
hit_iou_threshold = iou
hit_ioa_threshold = ioa
break
- res = {'label': label,
- 'norm_iou': hit_iou_threshold,
- 'norm_ioa': hit_ioa_threshold,
- 'norm_start': norm_start,
- 'proposal': proposal,
- 'hit_gts': hit_gts}
- return res
-
+ res = {
+ 'label': label,
+ 'norm_iou': hit_iou_threshold,
+ 'norm_ioa': hit_ioa_threshold,
+ 'norm_start': norm_start,
+ 'proposal': proposal,
+ 'hit_gts': hit_gts
+ }
+ return res
+
+
def get_bmn_info(gts_data, proposal_data, res_bmn, mode, score_threshold=0.01):
"""
@param, gts_data, original gts for action detection
@@ -71,7 +75,7 @@ def get_bmn_info(gts_data, proposal_data, res_bmn, mode, score_threshold=0.01):
url = gts_item['url']
print(url)
max_length = gts_item['total_frames']
-
+
video_name = os.path.basename(url).split('.')[0]
if not video_name in proposal_data:
continue
@@ -83,12 +87,14 @@ def get_bmn_info(gts_data, proposal_data, res_bmn, mode, score_threshold=0.01):
prop_actions = proposal_data[video_name]
- res_bmn['results'].append({'url': url,
- 'mode': mode,
- 'total_frames': max_length,
- 'num_gts': len(gts_actions),
- 'num_proposals': len(prop_actions),
- 'proposal_actions': []})
+ res_bmn['results'].append({
+ 'url': url,
+ 'mode': mode,
+ 'total_frames': max_length,
+ 'num_gts': len(gts_actions),
+ 'num_proposals': len(prop_actions),
+ 'proposal_actions': []
+ })
for proposal in prop_actions:
if proposal['score'] < score_threshold:
continue
@@ -98,9 +104,10 @@ def get_bmn_info(gts_data, proposal_data, res_bmn, mode, score_threshold=0.01):
# proposal['end'] = int(float(proposal['end']) / fps)
gts_info = clc_iou_of_proposal(proposal, gts_actions)
res_bmn['results'][-1]['proposal_actions'].append(gts_info)
-
+
return res_bmn
+
def save_feature(label_info, out_dir):
print('save feature ...')
fps = label_info['fps']
@@ -114,7 +121,7 @@ def save_feature(label_info, out_dir):
print(basename, res['num_proposals'], res['num_gts'])
mode = res['mode']
fid = fid_train if mode == 'train' else fid_val
- feature_path = os.path.join(feat_dir, basename + '.pkl')
+ feature_path = os.path.join(feat_dir, basename + '.pkl')
feature_data = pickle.load(open(feature_path, 'rb'))
image_feature = feature_data['image_feature']
audio_feature = feature_data['audio_feature']
@@ -129,38 +136,47 @@ def save_feature(label_info, out_dir):
label = proposal['label']
norm_iou = proposal['norm_iou']
norm_ioa = proposal['norm_ioa']
- start_id = max(proposal['proposal']['start'] - fps, 0) # 扩大特征序列
+ start_id = max(proposal['proposal']['start'] - fps, 0) # 扩大特征序列
end_id = min(proposal['proposal']['end'] + fps, max_len_img)
# get hit feature
- audio_feature_hit = audio_feature[min(int(start_id / fps), max_len_audio):
- min(int(end_id / fps), max_len_audio)]
- image_feature_hit = image_feature[start_id: end_id]
+ audio_feature_hit = audio_feature[min(
+ int(start_id / fps), max_len_audio):min(
+ int(end_id / fps), max_len_audio)]
+ image_feature_hit = image_feature[start_id:end_id]
#pcm_feature_hit = pcm_feature[start_id: end_id]
# image_feature_hit = image_feature[start_id * fps: end_id * fps]
# pcm_feature_hit = pcm_feature[start_id: end_id]
# save
- anno_info = {'image_feature': np.array(image_feature_hit, dtype=np.float32),
- 'audio_feature': np.array(audio_feature_hit, dtype=np.float32),
- 'feature_fps': fps,
- 'label_info': proposal,
- 'video_name': basename}
- save_name = '{}/{}_{}_{}.pkl'.format(out_feature_dir, basename, start_id, end_id)
- with open(save_name,'wb') as f:
+ anno_info = {
+ 'image_feature': np.array(
+ image_feature_hit, dtype=np.float32),
+ 'audio_feature': np.array(
+ audio_feature_hit, dtype=np.float32),
+ 'feature_fps': fps,
+ 'label_info': proposal,
+ 'video_name': basename
+ }
+ save_name = '{}/{}_{}_{}.pkl'.format(out_feature_dir, basename,
+ start_id, end_id)
+ with open(save_name, 'wb') as f:
pickle.dump(anno_info, f, protocol=pickle.HIGHEST_PROTOCOL)
- fid.write('{} {} {} {}\n'.format(save_name, label, norm_iou, norm_ioa))
+ fid.write('{} {} {} {}\n'.format(save_name, label, norm_iou,
+ norm_ioa))
fid_train.close()
fid_val.close()
print('done!')
+
if __name__ == "__main__":
if not os.path.exists(out_dir):
os.mkdir(out_dir)
prop_data = json.load(open(prop_file, 'rb'))
proposal_data = {}
for item in prop_data:
- proposal_data[os.path.basename(item['video_name'])] = item['bmn_results']
+ proposal_data[os.path.basename(item['video_name'])] = item[
+ 'bmn_results']
# get label info
res_bmn = {'fps': 0, 'results': []}
@@ -170,8 +186,8 @@ def save_feature(label_info, out_dir):
res_bmn = get_bmn_info(gts_data, proposal_data, res_bmn, item)
with open(out_dir + '/label_info.json', 'w', encoding='utf-8') as f:
- data = json.dumps(res_bmn, indent=4, ensure_ascii=False)
- f.write(data)
+ data = json.dumps(res_bmn, indent=4, ensure_ascii=False)
+ f.write(data)
# save feature
- save_feature(res_bmn, out_dir)
+ save_feature(res_bmn, out_dir)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_pptsm.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_pptsm.py
old mode 100644
new mode 100755
index af57ee3e0..7772c8f69
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_pptsm.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/datasets/script/get_instance_for_pptsm.py
@@ -60,8 +60,8 @@ def process(item, fps, save_folder):
data = f.read()
frames.append(data)
# print(label_str)
- outname = '%s/%s_%08d_%08d_%s.pkl' % (save_folder, basename, start, end,
- label_str)
+ outname = '%s/%s_%08d_%08d_%s.pkl' % (save_folder, basename, start,
+ end, label_str)
with open(outname, 'wb') as f:
pickle.dump((basename, label, frames), f, -1)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/configs/configs.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/configs/configs.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/configs/index_label_football_8.json b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/configs/index_label_football_8.json
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/extract_bmn.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/extract_bmn.py
old mode 100644
new mode 100755
index 8fef1da81..cfb63340e
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/extract_bmn.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/extract_bmn.py
@@ -22,6 +22,7 @@
import logger
logger = logger.Logger()
+
def load_model(cfg_file="configs/configs.yaml"):
"""
load_model
@@ -48,7 +49,7 @@ def video_classify(video_name):
pcm_path = video_name.replace(".mp4", ".pcm").replace("mp4", "pcm")
# step 1: extract feature
-
+
feature_path = video_name.replace(".mp4", ".pkl").replace("mp4", "features")
video_features = pickle.load(open(feature_path, 'rb'))
@@ -77,10 +78,13 @@ def video_classify(video_name):
for line in lines:
bmn_results = video_classify(line)
- results.append({'video_name': os.path.basename(line).split('.')[0],
- 'num_proposal': len(bmn_results),
- 'bmn_results': bmn_results})
-
- with open(dataset_dir + '/feature_bmn/prop.json', 'w', encoding='utf-8') as f:
- data = json.dumps(results, indent=4, ensure_ascii=False)
- f.write(data)
+ results.append({
+ 'video_name': os.path.basename(line).split('.')[0],
+ 'num_proposal': len(bmn_results),
+ 'bmn_results': bmn_results
+ })
+
+ with open(
+ dataset_dir + '/feature_bmn/prop.json', 'w', encoding='utf-8') as f:
+ data = json.dumps(results, indent=4, ensure_ascii=False)
+ f.write(data)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/extract_feat.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/extract_feat.py
old mode 100644
new mode 100755
index c30b64909..e399b3027
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/extract_feat.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/extractor/extract_feat.py
@@ -64,17 +64,20 @@ def video_classify(video_name):
np_pcm_features = np.array(pcm_features, dtype=np.float32)
t1 = time.time()
- logger.info('{} {} {}'.format(np_image_features.shape,
- np_audio_features.shape,
- np_pcm_features.shape))
- logger.info("step1: feature extract time: {} min".format((t1 - t0) * 1.0 / 60))
- video_features = {'image_feature': np_image_features,
- 'audio_feature': np_audio_features,
- 'pcm_feature': np_pcm_features}
-
+ logger.info('{} {} {}'.format(np_image_features.shape, np_audio_features.
+ shape, np_pcm_features.shape))
+ logger.info("step1: feature extract time: {} min".format((t1 - t0) * 1.0 /
+ 60))
+ video_features = {
+ 'image_feature': np_image_features,
+ 'audio_feature': np_audio_features,
+ 'pcm_feature': np_pcm_features
+ }
+
# save feature
feature_path = video_name.replace(".mp4", ".pkl").replace("mp4", "features")
- feat_pkl_str = pickle.dumps(video_features, protocol=pickle.HIGHEST_PROTOCOL)
+ feat_pkl_str = pickle.dumps(
+ video_features, protocol=pickle.HIGHEST_PROTOCOL)
with open(feature_path, 'wb') as fout:
fout.write(feat_pkl_str)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/action.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/action.py
old mode 100644
new mode 100755
index 6f4775f38..ef0c28adf
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/action.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/action.py
@@ -25,9 +25,11 @@
import logger
logger = logger.Logger()
+
def record_time_info(func):
"""decorator func to log cost time for func
"""
+
@functools.wraps(func)
def timer(*args):
"""log cost time for func
@@ -36,33 +38,37 @@ def timer(*args):
start_time = time.time()
retval = func(*args)
cost_time = round(time.time() - start_time, 5)
- logger.info("function [{}] run time: {:.2f} min".format(func.__name__, cost_time / 60))
+ logger.info("function [{}] run time: {:.2f} min".format(
+ func.__name__, cost_time / 60))
return retval
+
return timer
class ActionDetection(object):
"""ModelPredict"""
+
def __init__(self, cfg_file="configs/configs.yaml"):
cfg = parse_config(cfg_file)
self.configs = cfg
print_configs(self.configs, "Infer")
name = 'COMMON'
- self.DEBUG = cfg[name]['DEBUG']
- self.BMN_ONLY = cfg[name]['BMN_ONLY']
- self.LSTM_ONLY = cfg[name]['LSTM_ONLY']
- self.PCM_ONLY = cfg[name]['PCM_ONLY']
+ self.DEBUG = cfg[name]['DEBUG']
+ self.BMN_ONLY = cfg[name]['BMN_ONLY']
+ self.LSTM_ONLY = cfg[name]['LSTM_ONLY']
+ self.PCM_ONLY = cfg[name]['PCM_ONLY']
if self.LSTM_ONLY:
self.prop_dict = {}
for dataset in ['EuroCup2016']:
- prop_json = '/home/work/datasets/{}/feature_bmn/prop.json'.format(dataset)
+ prop_json = '/home/work/datasets/{}/feature_bmn/prop.json'.format(
+ dataset)
json_data = json.load(open(prop_json, 'r'))
for item in json_data:
- basename = prop_json.replace('feature_bmn/prop.json', 'mp4')
+ basename = prop_json.replace('feature_bmn/prop.json',
+ 'mp4')
basename = basename + '/' + item['video_name'] + '.mp4'
self.prop_dict[basename] = item['bmn_results']
-
@record_time_info
def load_model(self):
@@ -73,7 +79,7 @@ def load_model(self):
self.image_model = image_model.InferModel(self.configs)
if not self.PCM_ONLY:
self.audio_model = audio_model.InferModel(self.configs)
-
+
if not self.LSTM_ONLY:
self.prop_model = prop_model.InferModel(self.configs)
@@ -91,18 +97,19 @@ def infer(self, imgs_path, pcm_path, fps=5):
self.pcm_path = pcm_path
self.configs['COMMON']['fps'] = fps
- logger.info("==> input video {}".format(os.path.basename(self.imgs_path)))
-
+ logger.info("==> input video {}".format(
+ os.path.basename(self.imgs_path)))
+
# step 1: extract feature
video_features = self.extract_feature()
-
+
# step2: get proposal
bmn_results = self.extract_proposal(video_features)
-
+
# step3: classify
material = {'feature': video_features, 'proposal': bmn_results}
action_results = self.video_classify(material)
-
+
return bmn_results, action_results
@record_time_info
@@ -110,7 +117,8 @@ def video_classify(self, material):
"""video classify"""
if self.BMN_ONLY:
return []
- action_results = self.classify_model.predict(self.configs, material=material)
+ action_results = self.classify_model.predict(
+ self.configs, material=material)
logger.info('action shape {}'.format(np.array(action_results).shape))
return action_results
@@ -121,7 +129,8 @@ def extract_proposal(self, video_features):
basename = self.imgs_path.replace('frames', 'mp4') + '.mp4'
bmn_results = self.prop_dict[basename]
return bmn_results
- bmn_results = self.prop_model.predict(self.configs, material=video_features)
+ bmn_results = self.prop_model.predict(
+ self.configs, material=video_features)
logger.info('proposal shape {}'.format(np.array(bmn_results).shape))
return bmn_results
@@ -135,28 +144,34 @@ def extract_feature(self):
image_features = self.image_model.predict(self.configs)
if self.PCM_ONLY:
sample_rate = self.configs['AUDIO']['sample_rate']
- pcm_features = mfcc_extractor.extract_pcm(self.pcm_path, sample_rate)
+ pcm_features = mfcc_extractor.extract_pcm(self.pcm_path,
+ sample_rate)
audio_features = []
else:
- audio_features, pcm_features = self.audio_model.predict(self.configs)
+ audio_features, pcm_features = self.audio_model.predict(
+ self.configs)
np_image_features = np.array(image_features, dtype=np.float32)
np_audio_features = np.array(audio_features, dtype=np.float32)
np_pcm_features = np.array(pcm_features, dtype=np.float32)
- video_features = {'image_feature': np_image_features,
- 'audio_feature': np_audio_features,
- 'pcm_feature': np_pcm_features}
+ video_features = {
+ 'image_feature': np_image_features,
+ 'audio_feature': np_audio_features,
+ 'pcm_feature': np_pcm_features
+ }
else:
- feature_path = self.imgs_path.replace("frames", "features") + '.pkl'
+ feature_path = self.imgs_path.replace("frames",
+ "features") + '.pkl'
video_features = pickle.load(open(feature_path, 'rb'))
- logger.info("feature shape {} {} {}".format(video_features['image_feature'].shape,
- video_features['audio_feature'].shape,
- video_features['pcm_feature'].shape))
+ logger.info("feature shape {} {} {}".format(video_features[
+ 'image_feature'].shape, video_features[
+ 'audio_feature'].shape, video_features['pcm_feature'].shape))
return video_features
+
if __name__ == '__main__':
model_predict = ActionDetection(cfg_file="../configs/configs.yaml")
@@ -169,6 +184,5 @@ def extract_feature(self):
results = {'bmn_results': bmn_results, 'action_results': action_results}
with open('results.json', 'w', encoding='utf-8') as f:
- data = json.dumps(results, indent=4, ensure_ascii=False)
- f.write(data)
-
+ data = json.dumps(results, indent=4, ensure_ascii=False)
+ f.write(data)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/logger.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/logger.py
old mode 100644
new mode 100755
index b03348721..57b67518d
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/logger.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/logger.py
@@ -4,9 +4,11 @@
import os
import logging
+
class Logger(logging.Logger):
"""Customized logger for news stripper
"""
+
def __init__(self):
super(Logger, self).__init__(self)
if not os.path.exists('logs'):
@@ -21,4 +23,3 @@ def __init__(self):
formatter = logging.Formatter(format, datefmt)
handler.setFormatter(formatter)
self.addHandler(handler)
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/mfcc/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/mfcc/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/mfcc/model_config.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/mfcc/model_config.py
old mode 100644
new mode 100755
index 194365ece..58def2472
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/mfcc/model_config.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/mfcc/model_config.py
@@ -10,6 +10,7 @@ class ModelAudio(object):
"""
modelAudio
"""
+
def __init__(self, configs, use_gpu=1):
self.use_gpu = use_gpu
@@ -21,8 +22,8 @@ def predict_slice(self, wav_data, sample_rate):
"""
audio predict
"""
- examples_batch = feature_extractor.wav_to_example(
- wav_data, sample_rate)[0]
+ examples_batch = feature_extractor.wav_to_example(wav_data,
+ sample_rate)[0]
return examples_batch
def predict_audio(self, audio_file):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/audio_infer.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/audio_infer.py
old mode 100644
new mode 100755
index 7b19c90ed..ab8a8b449
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/audio_infer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/audio_infer.py
@@ -16,13 +16,14 @@
class InferModel(object):
"""audio infer"""
- def __init__(self, cfg, name='AUDIO'):
+
+ def __init__(self, cfg, name='AUDIO'):
name = name.upper()
- self.name = name
- model_file = cfg[name]['model_file']
- params_file = cfg[name]['params_file']
- gpu_mem = cfg[name]['gpu_mem']
- device_id = cfg[name]['device_id']
+ self.name = name
+ model_file = cfg[name]['model_file']
+ params_file = cfg[name]['params_file']
+ gpu_mem = cfg[name]['gpu_mem']
+ device_id = cfg[name]['device_id']
# model init
config = Config(model_file, params_file)
@@ -39,7 +40,6 @@ def __init__(self, cfg, name='AUDIO'):
output_names = self.predictor.get_output_names()
self.output_tensor = self.predictor.get_output_handle(output_names[0])
-
def infer(self, input):
"""infer"""
self.input_tensor.copy_from_cpu(input)
@@ -47,14 +47,13 @@ def infer(self, input):
output = self.output_tensor.copy_to_cpu()
return output
-
def predict(self, infer_config):
"""predict"""
infer_reader = reader.get_reader(self.name, 'infer', infer_config)
feature_list = []
pcm_list = []
for infer_iter, data in enumerate(infer_reader()):
- inputs = np.array(data, dtype = 'float32')
+ inputs = np.array(data, dtype='float32')
output = self.infer(inputs)
feature_list.append(np.squeeze(output))
pcm_list.append(inputs)
@@ -64,7 +63,7 @@ def predict(self, infer_config):
if __name__ == "__main__":
- cfg_file = '/home/work/inference/configs/configs.yaml'
+ cfg_file = '/home/work/inference/configs/configs.yaml'
cfg = parse_config(cfg_file)
model = InferModel(cfg)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/bmn_infer.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/bmn_infer.py
old mode 100644
new mode 100755
index 963f75669..0f1632558
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/bmn_infer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/bmn_infer.py
@@ -19,17 +19,18 @@
class InferModel(object):
"""bmn infer"""
- def __init__(self, cfg, name='BMN'):
+
+ def __init__(self, cfg, name='BMN'):
name = name.upper()
- self.name = name
- model_file = cfg[name]['model_file']
- params_file = cfg[name]['params_file']
- gpu_mem = cfg[name]['gpu_mem']
- device_id = cfg[name]['device_id']
+ self.name = name
+ model_file = cfg[name]['model_file']
+ params_file = cfg[name]['params_file']
+ gpu_mem = cfg[name]['gpu_mem']
+ device_id = cfg[name]['device_id']
- self.nms_thread = cfg[name]['nms_thread']
- self.min_pred_score = cfg[name]['score_thread']
- self.min_frame_thread = cfg['COMMON']['fps']
+ self.nms_thread = cfg[name]['nms_thread']
+ self.min_pred_score = cfg[name]['score_thread']
+ self.min_frame_thread = cfg['COMMON']['fps']
# model init
config = Config(model_file, params_file)
@@ -48,7 +49,6 @@ def __init__(self, cfg, name='BMN'):
self.output2_tensor = self.predictor.get_output_handle(output_names[1])
self.output3_tensor = self.predictor.get_output_handle(output_names[2])
-
def infer(self, input):
"""infer"""
self.input_tensor.copy_from_cpu(input)
@@ -58,10 +58,15 @@ def infer(self, input):
output3 = self.output3_tensor.copy_to_cpu()
return output1, output2, output3
-
- def generate_props(self, pred_bmn, pred_start, pred_end, max_window=200, min_window=5):
+ def generate_props(self,
+ pred_bmn,
+ pred_start,
+ pred_end,
+ max_window=200,
+ min_window=5):
"""generate_props"""
- video_len = min(pred_bmn.shape[-1], min(pred_start.shape[-1], pred_end.shape[-1]))
+ video_len = min(pred_bmn.shape[-1],
+ min(pred_start.shape[-1], pred_end.shape[-1]))
pred_bmn = pred_bmn[0, :, :] * pred_bmn[1, :, :]
start_mask = self.boundary_choose(pred_start)
start_mask[0] = 1.
@@ -72,7 +77,8 @@ def generate_props(self, pred_bmn, pred_start, pred_end, max_window=200, min_win
for jdx in range(video_len):
start_index = jdx
end_index = start_index + idx
- if end_index < video_len and start_mask[start_index] == 1 and end_mask[end_index] == 1:
+ if end_index < video_len and start_mask[
+ start_index] == 1 and end_mask[end_index] == 1:
xmin = start_index
xmax = end_index
xmin_score = pred_start[start_index]
@@ -82,7 +88,6 @@ def generate_props(self, pred_bmn, pred_start, pred_end, max_window=200, min_win
score_results.append([xmin, xmax, conf_score])
return score_results
-
def boundary_choose(self, score_list):
"""boundary_choose"""
max_score = max(score_list)
@@ -96,17 +101,17 @@ def boundary_choose(self, score_list):
mask = (mask_high | mask_peak).astype('float32')
return mask
-
def predict(self, infer_config, material):
"""predict"""
- infer_reader = reader.get_reader(self.name, 'infer', infer_config, material=material)
+ infer_reader = reader.get_reader(
+ self.name, 'infer', infer_config, material=material)
feature_list = []
for infer_iter, data in enumerate(infer_reader()):
- inputs = [items[0] for items in data]
- winds = [items[1] for items in data]
- feat_info = [items[2] for items in data]
- feature_T = feat_info[0][0]
- feature_N = feat_info[0][1]
+ inputs = [items[0] for items in data]
+ winds = [items[1] for items in data]
+ feat_info = [items[2] for items in data]
+ feature_T = feat_info[0][0]
+ feature_N = feat_info[0][1]
inputs = np.array(inputs)
pred_bmn, pred_sta, pred_end = self.infer(inputs)
@@ -118,23 +123,25 @@ def predict(self, infer_config, material):
sum_pred_cnt = np.zeros((feature_T, ))
for idx, sub_wind in enumerate(winds):
- sum_pred_bmn[:, :, sub_wind[0]: sub_wind[1]] += pred_bmn[idx]
- sum_pred_sta[sub_wind[0]: sub_wind[1]] += pred_sta[idx]
- sum_pred_end[sub_wind[0]: sub_wind[1]] += pred_end[idx]
- sum_pred_cnt[sub_wind[0]: sub_wind[1]] += np.ones((sub_wind[1] - sub_wind[0], ))
+ sum_pred_bmn[:, :, sub_wind[0]:sub_wind[1]] += pred_bmn[idx]
+ sum_pred_sta[sub_wind[0]:sub_wind[1]] += pred_sta[idx]
+ sum_pred_end[sub_wind[0]:sub_wind[1]] += pred_end[idx]
+ sum_pred_cnt[sub_wind[0]:sub_wind[1]] += np.ones(
+ (sub_wind[1] - sub_wind[0], ))
pred_bmn = sum_pred_bmn / sum_pred_cnt
pred_sta = sum_pred_sta / sum_pred_cnt
pred_end = sum_pred_end / sum_pred_cnt
score_result = self.generate_props(pred_bmn, pred_sta, pred_end)
- results = process_proposal(score_result, self.min_frame_thread, self.nms_thread, self.min_pred_score)
+ results = process_proposal(score_result, self.min_frame_thread,
+ self.nms_thread, self.min_pred_score)
return results
if __name__ == "__main__":
- cfg_file = '/home/work/inference/configs/configs.yaml'
+ cfg_file = '/home/work/inference/configs/configs.yaml'
cfg = parse_config(cfg_file)
model = InferModel(cfg)
@@ -151,6 +158,6 @@ def predict(self, infer_config, material):
results = {'proposal': outputs}
with open('results.json', 'w', encoding='utf-8') as f:
- data = json.dumps(results, indent=4, ensure_ascii=False)
- f.write(data)
+ data = json.dumps(results, indent=4, ensure_ascii=False)
+ f.write(data)
print('cost time = {} min'.format((t1 - t0) / 60.0))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/lstm_infer.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/lstm_infer.py
old mode 100644
new mode 100755
index acb387422..268ec7baf
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/lstm_infer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/lstm_infer.py
@@ -19,23 +19,24 @@
class InferModel(object):
"""lstm infer"""
- def __init__(self, cfg, name='ACTION'):
+
+ def __init__(self, cfg, name='ACTION'):
name = name.upper()
- self.name = name
- model_file = cfg[name]['model_file']
- params_file = cfg[name]['params_file']
- gpu_mem = cfg[name]['gpu_mem']
- device_id = cfg[name]['device_id']
-
- self.topk = cfg[name]['topk']
- self.frame_offset = cfg[name]['nms_offset']
- self.nms_thread = cfg[name]['nms_thread']
- self.cls_thread = cfg[name]['classify_score_thread']
- self.iou_thread = cfg[name]['iou_score_thread']
+ self.name = name
+ model_file = cfg[name]['model_file']
+ params_file = cfg[name]['params_file']
+ gpu_mem = cfg[name]['gpu_mem']
+ device_id = cfg[name]['device_id']
+
+ self.topk = cfg[name]['topk']
+ self.frame_offset = cfg[name]['nms_offset']
+ self.nms_thread = cfg[name]['nms_thread']
+ self.cls_thread = cfg[name]['classify_score_thread']
+ self.iou_thread = cfg[name]['iou_score_thread']
self.label_map_file = cfg['COMMON']['label_dic']
- self.fps = cfg['COMMON']['fps']
- self.nms_id = 5
+ self.fps = cfg['COMMON']['fps']
+ self.nms_id = 5
# model init
config = Config(model_file, params_file)
@@ -54,7 +55,6 @@ def __init__(self, cfg, name='ACTION'):
self.output1_tensor = self.predictor.get_output_handle(output_names[0])
self.output2_tensor = self.predictor.get_output_handle(output_names[1])
-
def infer(self, input1_arr, input1_lod, input2_arr=None, input2_lod=None):
"""infer"""
self.input1_tensor.copy_from_cpu(input1_arr)
@@ -86,7 +86,8 @@ def pre_process(self, input):
def predict(self, infer_config, material):
"""predict"""
- infer_reader = reader.get_reader(self.name, 'infer', infer_config, material=material)
+ infer_reader = reader.get_reader(
+ self.name, 'infer', infer_config, material=material)
results = []
for infer_iter, data in enumerate(infer_reader()):
video_id = [[items[-2], items[-1]] for items in data]
@@ -94,33 +95,36 @@ def predict(self, infer_config, material):
input2 = [items[1] for items in data]
input1_arr, input1_lod = self.pre_process(input1)
input2_arr, input2_lod = self.pre_process(input2)
- output1, output2 = self.infer(input1_arr, input1_lod, input2_arr, input2_lod)
+ output1, output2 = self.infer(input1_arr, input1_lod, input2_arr,
+ input2_lod)
# output1, output2 = self.infer(input1_arr, input1_lod)
- predictions_id = output1
+ predictions_id = output1
predictions_iou = output2
for i in range(len(predictions_id)):
topk_inds = predictions_id[i].argsort()[0 - self.topk:]
topk_inds = topk_inds[::-1]
preds_id = predictions_id[i][topk_inds]
preds_iou = predictions_iou[i][0]
- results.append((video_id[i], preds_id.tolist(), topk_inds.tolist(), preds_iou.tolist()))
+ results.append((video_id[i], preds_id.tolist(),
+ topk_inds.tolist(), preds_iou.tolist()))
- predict_result = get_action_result(results, self.label_map_file, self.fps,
- self.cls_thread, self.iou_thread,
- self.nms_id, self.nms_thread, self.frame_offset)
+ predict_result = get_action_result(
+ results, self.label_map_file, self.fps, self.cls_thread,
+ self.iou_thread, self.nms_id, self.nms_thread, self.frame_offset)
return predict_result
if __name__ == "__main__":
- cfg_file = '/home/work/inference/configs/configs.yaml'
+ cfg_file = '/home/work/inference/configs/configs.yaml'
cfg = parse_config(cfg_file)
model = InferModel(cfg)
# proposal total
prop_dict = {}
for dataset in ['EuroCup2016', 'WorldCup2018']:
- prop_json = '/home/work/datasets/{}/feature_bmn/prop.json'.format(dataset)
+ prop_json = '/home/work/datasets/{}/feature_bmn/prop.json'.format(
+ dataset)
json_data = json.load(open(prop_json, 'r'))
for item in json_data:
basename = prop_json.replace('feature_bmn/prop.json', 'mp4')
@@ -146,7 +150,7 @@ def predict(self, infer_config, material):
t1 = time.time()
results = {'actions': outputs}
with open('results.json', 'w', encoding='utf-8') as f:
- data = json.dumps(results, indent=4, ensure_ascii=False)
- f.write(data)
+ data = json.dumps(results, indent=4, ensure_ascii=False)
+ f.write(data)
print('cost time = {} min'.format((t1 - t0) / 60.0))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/pptsm_infer.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/pptsm_infer.py
old mode 100644
new mode 100755
index 71fda566f..80e469014
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/pptsm_infer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/models/pptsm_infer.py
@@ -16,13 +16,14 @@
class InferModel(object):
"""pptsm infer"""
- def __init__(self, cfg, name='PPTSM'):
+
+ def __init__(self, cfg, name='PPTSM'):
name = name.upper()
- self.name = name
- model_file = cfg[name]['model_file']
- params_file = cfg[name]['params_file']
- gpu_mem = cfg[name]['gpu_mem']
- device_id = cfg[name]['device_id']
+ self.name = name
+ model_file = cfg[name]['model_file']
+ params_file = cfg[name]['params_file']
+ gpu_mem = cfg[name]['gpu_mem']
+ device_id = cfg[name]['device_id']
# model init
config = Config(model_file, params_file)
@@ -40,7 +41,6 @@ def __init__(self, cfg, name='PPTSM'):
output_names = self.predictor.get_output_names()
self.output_tensor = self.predictor.get_output_handle(output_names[0])
-
def infer(self, input):
"""infer"""
self.input_tensor.copy_from_cpu(input)
@@ -48,7 +48,6 @@ def infer(self, input):
output = self.output_tensor.copy_to_cpu()
return output
-
def predict(self, infer_config):
"""predict"""
infer_reader = reader.get_reader(self.name, 'infer', infer_config)
@@ -63,11 +62,11 @@ def predict(self, infer_config):
if __name__ == "__main__":
- cfg_file = '/home/work/inference/configs/configs.yaml'
+ cfg_file = '/home/work/inference/configs/configs.yaml'
cfg = parse_config(cfg_file)
model = InferModel(cfg)
- imgs_path = '/home/work/datasets/WorldCup2018/frames/6e577252c4004961ac7caa738a52c238/'
+ imgs_path = '/home/work/datasets/WorldCup2018/frames/6e577252c4004961ac7caa738a52c238/'
imgs_list = get_images(imgs_path)
t0 = time.time()
cfg['PPTSM']['frame_list'] = imgs_list
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/audio_reader.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/audio_reader.py
old mode 100644
new mode 100755
index 2e1f1d28f..6b77a595f
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/audio_reader.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/audio_reader.py
@@ -32,6 +32,7 @@
from .reader_utils import DataReader
import mfcc.feature_extractor as feature_extractor
+
class AudioReader(DataReader):
"""
Data reader for youtube-8M dataset, which was stored as features extracted by prior networks
@@ -58,14 +59,16 @@ def create_reader(self):
with open(self.pcm_file, "rb") as f:
pcm_data = f.read()
audio_data = np.fromstring(pcm_data, dtype=np.int16)
- examples = feature_extractor.wav_to_example(audio_data, self.sample_rate)
+ examples = feature_extractor.wav_to_example(audio_data,
+ self.sample_rate)
+
# print(examples.shape)
def reader():
"""reader"""
batch_out = []
batch_out_pre = []
-
+
for audio in examples:
# batch_out.append([audio])
batch_out.append(audio)
@@ -74,5 +77,5 @@ def reader():
batch_out = []
if len(batch_out) > 0:
yield batch_out
-
+
return reader
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/bmninf_reader.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/bmninf_reader.py
old mode 100644
new mode 100755
index a076f2bfe..e09630e59
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/bmninf_reader.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/bmninf_reader.py
@@ -113,14 +113,13 @@ def get_match_map(self):
self.anchor_xmin = [self.tgap * i for i in range(self.tscale)]
self.anchor_xmax = [self.tgap * i for i in range(1, self.tscale + 1)]
-
def load_file(self, video_wind):
"""
load_file
"""
start_feat_id = video_wind[0]
end_feat_id = video_wind[1]
- video_feat = self.features[video_wind[0]: video_wind[1]]
+ video_feat = self.features[video_wind[0]:video_wind[1]]
video_feat = video_feat.T
video_feat = video_feat.astype("float32")
return video_feat
@@ -135,6 +134,7 @@ def make_infer_reader(self):
"""
reader for inference
"""
+
def reader():
"""
reader
@@ -144,7 +144,8 @@ def reader():
for video_wind in self.video_list:
video_idx = self.video_list.index(video_wind)
video_feat = self.load_file(video_wind)
- batch_out.append((video_feat, video_wind, [self.duration, self.dscale]))
+ batch_out.append(
+ (video_feat, video_wind, [self.duration, self.dscale]))
if len(batch_out) == self.batch_size:
yield batch_out
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/feature_reader.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/feature_reader.py
old mode 100644
new mode 100755
index 4e406f739..63631bf55
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/feature_reader.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/feature_reader.py
@@ -27,6 +27,7 @@
from .reader_utils import DataReader
+
class FeatureReader(DataReader):
"""
Data reader for youtube-8M dataset, which was stored as features extracted by prior networks
@@ -54,7 +55,8 @@ def create_reader(self):
image_feature_list = self.feature['image_feature']
audio_feature_list = self.feature['audio_feature']
pcm_feature_list = self.feature['pcm_feature']
- pcm_feature_list = pcm_feature_list.reshape((pcm_feature_list.shape[0] * 5, 640))
+ pcm_feature_list = pcm_feature_list.reshape(
+ (pcm_feature_list.shape[0] * 5, 640))
fl = self.proposal
@@ -71,17 +73,19 @@ def reader():
end_id = int(prop_info['end'])
bmn_score = float(prop_info['score'])
try:
- image_feature = image_feature_list[start_id: end_id]
- audio_feature = audio_feature_list[int(start_id / self.fps): int(end_id / self.fps)]
- pcm_feature = pcm_feature_list[start_id: end_id]
+ image_feature = image_feature_list[start_id:end_id]
+ audio_feature = audio_feature_list[int(
+ start_id / self.fps):int(end_id / self.fps)]
+ pcm_feature = pcm_feature_list[start_id:end_id]
# image_feature = np.concatenate((image_feature, pcm_feature), axis=1)
-
- batch_out.append((image_feature, audio_feature, 0, prop_info))
+
+ batch_out.append(
+ (image_feature, audio_feature, 0, prop_info))
if len(batch_out) == self.batch_size:
yield batch_out
batch_out = []
except Exception as e:
continue
- return reader
+ return reader
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/reader_utils.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/reader_utils.py
old mode 100644
new mode 100755
index f76b5d38d..32253f338
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/reader_utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/reader_utils.py
@@ -66,6 +66,7 @@ class ReaderZoo(object):
"""
ReaderZoo
"""
+
def __init__(self):
"""
__init__
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/tsminf_reader.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/tsminf_reader.py
old mode 100644
new mode 100755
index 9886d5424..11f4c15df
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/tsminf_reader.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/reader/tsminf_reader.py
@@ -37,17 +37,19 @@ class TSMINFReader(DataReader):
def __init__(self, name, mode, cfg, material=None):
super(TSMINFReader, self).__init__(name, mode, cfg)
name = name.upper()
- self.seg_num = cfg[name]['seg_num']
- self.seglen = cfg[name]['seglen']
- self.short_size = cfg[name]['short_size']
- self.target_size = cfg[name]['target_size']
- self.batch_size = cfg[name]['batch_size']
+ self.seg_num = cfg[name]['seg_num']
+ self.seglen = cfg[name]['seglen']
+ self.short_size = cfg[name]['short_size']
+ self.target_size = cfg[name]['target_size']
+ self.batch_size = cfg[name]['batch_size']
self.reader_threads = cfg[name]['reader_threads']
- self.buf_size = cfg[name]['buf_size']
- self.video_path = cfg[name]['frame_list']
+ self.buf_size = cfg[name]['buf_size']
+ self.video_path = cfg[name]['frame_list']
- self.img_mean = np.array(cfg[name]['image_mean']).reshape([3, 1, 1]).astype(np.float32)
- self.img_std = np.array(cfg[name]['image_std']).reshape([3, 1, 1]).astype(np.float32)
+ self.img_mean = np.array(cfg[name]['image_mean']).reshape(
+ [3, 1, 1]).astype(np.float32)
+ self.img_std = np.array(cfg[name]['image_std']).reshape(
+ [3, 1, 1]).astype(np.float32)
self.material = material
@@ -56,16 +58,16 @@ def create_reader(self):
batch loader for TSN
"""
_reader = self._inference_reader_creator_longvideo(
- self.video_path,
- self.mode,
- seg_num=self.seg_num,
- seglen=self.seglen,
- short_size=self.short_size,
- target_size=self.target_size,
- img_mean=self.img_mean,
- img_std=self.img_std,
- num_threads = self.reader_threads,
- buf_size = self.buf_size)
+ self.video_path,
+ self.mode,
+ seg_num=self.seg_num,
+ seglen=self.seglen,
+ short_size=self.short_size,
+ target_size=self.target_size,
+ img_mean=self.img_mean,
+ img_std=self.img_std,
+ num_threads=self.reader_threads,
+ buf_size=self.buf_size)
def _batch_reader():
batch_out = []
@@ -81,20 +83,22 @@ def _batch_reader():
return _batch_reader
-
- def _inference_reader_creator_longvideo(self, video_path, mode, seg_num, seglen,
- short_size, target_size, img_mean, img_std, num_threads, buf_size):
+ def _inference_reader_creator_longvideo(
+ self, video_path, mode, seg_num, seglen, short_size, target_size,
+ img_mean, img_std, num_threads, buf_size):
"""
inference reader for video
"""
+
def reader():
"""
reader
"""
+
def image_buf(image_id_path_buf):
"""
image_buf reader
- """
+ """
try:
img_path = image_id_path_buf[1]
img = Image.open(img_path).convert("RGB")
@@ -105,39 +109,43 @@ def image_buf(image_id_path_buf):
frame_len = len(video_path)
read_thread_num = seg_num
for i in range(0, frame_len, read_thread_num):
- image_list_part = video_path[i: i + read_thread_num]
+ image_list_part = video_path[i:i + read_thread_num]
image_id_path_buf_list = []
for k in range(len(image_list_part)):
image_id_path_buf_list.append([k, image_list_part[k], None])
-
- with concurrent.futures.ThreadPoolExecutor(max_workers=read_thread_num) as executor:
- executor.map(lambda image_id_path_buf: image_buf(image_id_path_buf), image_id_path_buf_list)
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=read_thread_num) as executor:
+ executor.map(
+ lambda image_id_path_buf: image_buf(image_id_path_buf),
+ image_id_path_buf_list)
imgs_seg_list = [x[2] for x in image_id_path_buf_list]
-
+
# add the fault-tolerant for bad image
for k in range(len(image_id_path_buf_list)):
img_buf = image_id_path_buf_list[k][2]
pad_id = 1
while pad_id < seg_num and img_buf is None:
- img_buf = imgs_seg_list[(k + pad_id)%seg_num][2]
+ img_buf = imgs_seg_list[(k + pad_id) % seg_num][2]
if img_buf is None:
- logger.info("read img erro from {} to {}".format(i, i + read_thread_num))
+ logger.info("read img erro from {} to {}".format(
+ i, i + read_thread_num))
exit(0)
else:
imgs_seg_list[k] = img_buf
for pad_id in range(len(imgs_seg_list), seg_num):
imgs_seg_list.append(imgs_seg_list[-1])
- yield imgs_seg_list
+ yield imgs_seg_list
def inference_imgs_transform(imgs_list, mode, seg_num, seglen, short_size,\
target_size, img_mean, img_std):
"""
inference_imgs_transform
- """
- imgs_ret = imgs_transform(imgs_list, mode, seg_num, seglen, short_size,
- target_size, img_mean, img_std)
+ """
+ imgs_ret = imgs_transform(imgs_list, mode, seg_num, seglen,
+ short_size, target_size, img_mean,
+ img_std)
label_ret = 0
return imgs_ret, label_ret
@@ -152,7 +160,8 @@ def inference_imgs_transform(imgs_list, mode, seg_num, seglen, short_size,\
img_mean=img_mean,
img_std=img_std)
- return paddle.reader.xmap_readers(mapper, reader, num_threads, buf_size, order=True)
+ return paddle.reader.xmap_readers(
+ mapper, reader, num_threads, buf_size, order=True)
def imgs_transform(imgs,
@@ -260,10 +269,10 @@ def _sample_crop_size(im_size):
'crop_h': crop_pair[1],
'offset_w': w_offset,
'offset_h': h_offset
- }
-
+ }
+
return crop_info
-
+
crop_info = _sample_crop_size(im_size)
crop_w = crop_info['crop_w']
crop_h = crop_info['crop_h']
@@ -355,4 +364,3 @@ def group_scale(imgs, target_size):
resized_imgs.append(img.resize((ow, oh), Image.BILINEAR))
return resized_imgs
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/config_utils.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/config_utils.py
old mode 100644
new mode 100755
index e5db92b0d..6bfcbaadc
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/config_utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/config_utils.py
@@ -29,10 +29,12 @@
'infer',
]
+
class AttrDict(dict):
"""
AttrDict
"""
+
def __getattr__(self, key):
return self[key]
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/preprocess.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/preprocess.py
old mode 100644
new mode 100755
index d14aaf1ee..1451df1fe
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/preprocess.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/preprocess.py
@@ -9,7 +9,8 @@ def ffmpeg_frames(mp4_addr, frame_out_folder, fps=5):
if os.path.exists(frame_out_folder):
shutil.rmtree(frame_out_folder)
os.makedirs(frame_out_folder)
- cmd = './src/utils/ffmpeg -v 0 -i %s -r %d -q 0 %s/%s.jpg' % (mp4_addr, fps, frame_out_folder, '%08d')
+ cmd = './src/utils/ffmpeg -v 0 -i %s -r %d -q 0 %s/%s.jpg' % (
+ mp4_addr, fps, frame_out_folder, '%08d')
os.system(cmd)
@@ -23,7 +24,7 @@ def ffmpeg_pcm(mp4_addr, save_file_name):
def ffmpeg_mp4(mp4_url, mp4_addr):
"""ffmpeg_mp4"""
cmd = "wget %s -O %s -q" % (mp4_url, mp4_addr)
- print ("cmd = ", cmd)
+ print("cmd = ", cmd)
os.system(cmd)
@@ -33,4 +34,3 @@ def get_images(image_path):
images = images
images_path_list = [image_path + '/' + im for im in images]
return images_path_list
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/process_result.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/process_result.py
old mode 100644
new mode 100755
index 164869696..740821fb1
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/process_result.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/action_detect/utils/process_result.py
@@ -32,7 +32,10 @@ def get_data_res(label_map, data, topk):
labelid_top = data[i][2][k]
label_iou = data[i][3]
labelname_top = label_map[str(labelid_top)]
- video_result.append([feature_start_id, feature_end_id, labelid_top, labelname_top, score_top, label_iou])
+ video_result.append([
+ feature_start_id, feature_end_id, labelid_top, labelname_top,
+ score_top, label_iou
+ ])
return video_result
@@ -63,14 +66,18 @@ def base_nms(bboxes, thresh, delta=0, nms_id=2):
tt1 = np.maximum(t1[i], t1[order[1:]])
tt2 = np.minimum(t2[i], t2[order[1:]])
intersection = tt2 - tt1
- IoU = intersection / (durations[i] + durations[order[1:]] - intersection).astype(float)
+ IoU = intersection / (
+ durations[i] + durations[order[1:]] - intersection).astype(float)
inds = np.where(IoU <= thresh)[0]
order = order[inds + 1]
return [bboxes[i] for i in keep]
-def process_proposal(source_prop_box, min_frame_thread=5, nms_thresh=0.7, score_thresh=0.01):
+def process_proposal(source_prop_box,
+ min_frame_thread=5,
+ nms_thresh=0.7,
+ score_thresh=0.01):
"""process_video_prop"""
prop_box = []
for items in source_prop_box:
@@ -122,12 +129,14 @@ def process_video_classify(video_prop, fps, score_thread, iou_thread, \
label_classify_score = item[4]
label_iou_score = item[5]
if label_classify_score > score_thread and label_iou_score > iou_thread:
- video_results.append({"start_time": start_time,
- "end_time": end_time,
- "label_id": label_id,
- "label_name": label_name,
- "classify_score": label_classify_score,
- "iou_score": label_iou_score})
+ video_results.append({
+ "start_time": start_time,
+ "end_time": end_time,
+ "label_id": label_id,
+ "label_name": label_name,
+ "classify_score": label_classify_score,
+ "iou_score": label_iou_score
+ })
return video_results
@@ -139,6 +148,8 @@ def get_action_result(result_info, label_map_file, fps, score_thread=0, \
label_map = json.load(open(label_map_file, 'r', encoding='utf-8'))
org_result = get_data_res(label_map, result_info, topk)
- nms_result = process_video_classify(org_result, fps, score_thread, iou_thread, nms_id, nms_thread, frame_offset)
+ nms_result = process_video_classify(org_result, fps, score_thread,
+ iou_thread, nms_id, nms_thread,
+ frame_offset)
return nms_result
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/configs/configs.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/configs/configs.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/configs/index_label_football_8.json b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/configs/index_label_football_8.json
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/eval.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/eval.py
old mode 100644
new mode 100755
index f9117f514..2460eb1aa
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/eval.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/eval.py
@@ -10,18 +10,21 @@
import numpy as np
import io
-sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding = 'utf-8')
+sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
dataset = "/home/PaddleVideo/applications/FootballAction/datasets"
label_index_file = './configs/index_label_football_8.json'
eval_datasets = ['EuroCup2016']
-label_files = {'train': 'label_cls8_train.json',
- 'validation': 'label_cls8_val.json'}
+label_files = {
+ 'train': 'label_cls8_train.json',
+ 'validation': 'label_cls8_val.json'
+}
global fps, mode
label_index = json.load(open(label_index_file, 'rb'))
+
def load_gts():
global fps
gts_data = {'fps': 0, 'gts': {}}
@@ -33,10 +36,11 @@ def load_gts():
fps = gts['fps']
for gt in gts['gts']:
gt['mode'] = item
- basename = '{}/{}/mp4/{}'.format(dataset, eval_data, os.path.basename(gt['url']))
+ basename = '{}/{}/mp4/{}'.format(dataset, eval_data,
+ os.path.basename(gt['url']))
gts_data['gts'][basename] = gt
return gts_data['gts']
-
+
def computeIoU(e1, e2):
"""
@@ -49,65 +53,86 @@ def computeIoU(e1, e2):
x1 = np.maximum(e1["start"], e2["start"])
x2 = np.minimum(e1["end"], e2["end"])
inter = np.maximum(0.0, x2 - x1)
- iou = 0.0 if (area1 + area2 - inter) == 0 else inter * 1.0 / (area1 + area2 - inter)
+ iou = 0.0 if (area1 + area2 - inter) == 0 else inter * 1.0 / (
+ area1 + area2 - inter)
if not mode == 'proposal':
iou = 0.0 if area2 == 0 else inter * 1.0 / area2
return iou
def convert_proposal(boxes, basename, score_threshold=0.01):
- boxes = sorted(boxes, key=lambda x:float(x['score']), reverse=True)
+ boxes = sorted(boxes, key=lambda x: float(x['score']), reverse=True)
res = []
for box in boxes:
if not float(box['score']) >= score_threshold:
continue
- res.append({'basename': basename,
- 'start': int(float(box['start']) / fps),
- 'end': int(float(box['end']) / fps),
- 'label': 0})
+ res.append({
+ 'basename': basename,
+ 'start': int(float(box['start']) / fps),
+ 'end': int(float(box['end']) / fps),
+ 'label': 0
+ })
return res
+
def convert_classify(boxes, basename, iou_threshold, score_threshold):
- boxes = sorted(boxes, key=lambda x:(float(x['classify_score']), float(x['iou_score'])), reverse=True)
+ boxes = sorted(
+ boxes,
+ key=lambda x: (float(x['classify_score']), float(x['iou_score'])),
+ reverse=True)
+
def convert_time_to_frame(time_type):
return int(time_type)
h, m, s = time_type.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
+
res = []
for box in boxes:
if not (box['iou_score'] >= iou_threshold and
box['classify_score'] >= score_threshold):
continue
- res.append({'basename': basename,
- 'start': convert_time_to_frame(box['start_time']),
- 'end': convert_time_to_frame(box['end_time']),
- 'label': box['label_id']})
+ res.append({
+ 'basename': basename,
+ 'start': convert_time_to_frame(box['start_time']),
+ 'end': convert_time_to_frame(box['end_time']),
+ 'label': box['label_id']
+ })
return res
-
+
+
def convert_groundtruth(boxes, basename, phase=None):
res = []
for box in boxes:
for item in box['label_ids']:
label = 0 if phase == 'proposal' else item
- res.append({'basename': basename,
- 'start': box['start_id'],
- 'end': box['end_id'],
- 'label': label})
+ res.append({
+ 'basename': basename,
+ 'start': box['start_id'],
+ 'end': box['end_id'],
+ 'label': label
+ })
return res
+
+
def print_head(iou):
print("\nioa = {:.1f}".format(iou))
res_str = ''
for item in ['label_name']:
res_str += '{:<12s}'.format(item)
- for item in ['label_id', 'precision', 'recall', 'hit_prop', 'num_prop', 'hit_gts', 'num_gts']:
+ for item in [
+ 'label_id', 'precision', 'recall', 'hit_prop', 'num_prop',
+ 'hit_gts', 'num_gts'
+ ]:
res_str += '{:<10s}'.format(item)
print(res_str)
+
def print_result(res_dict, label='avg'):
if label == 'avg':
res_str = '{:<22s}'.format(str(label))
else:
- res_str = '{0:{2}<6s}{1:<10s}'.format(label_index[str(label)], str(label), chr(12288))
+ res_str = '{0:{2}<6s}{1:<10s}'.format(label_index[str(label)],
+ str(label), chr(12288))
for item in ['prec', 'recall']:
res_str += '{:<10.4f}'.format(res_dict[item])
@@ -115,7 +140,8 @@ def print_result(res_dict, label='avg'):
res_str += '{:<10d}'.format(res_dict[item])
print(res_str)
-def evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = False):
+
+def evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub=False):
iou_map = [computeIoU(resId, gtsId) for resId in res_boxes \
for gtsId in gts_boxes]
iou_map = np.array(iou_map).reshape((len(res_boxes), len(gts_boxes)))
@@ -127,7 +153,7 @@ def evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = False):
for iou_threshold in iou_range:
if show_sub:
print_head(iou_threshold)
-
+
iou_prop = np.array([k >= iou_threshold for k in hit_map_prop_total])
average_results = {}
for label_id in label_range:
@@ -141,7 +167,8 @@ def evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = False):
else:
hit_prop_index = label_prop & iou_prop
sub_results['hit_prop'] = sum(hit_prop_index)
- sub_results['hit_gts'] = len(set(hit_map_index_total[hit_prop_index]))
+ sub_results['hit_gts'] = len(
+ set(hit_map_index_total[hit_prop_index]))
sub_results['prec'] = 0.0 if sub_results['num_prop'] == 0 \
else sub_results['hit_prop'] * 1.0 / sub_results['num_prop']
@@ -153,7 +180,7 @@ def evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = False):
if not item in average_results:
average_results[item] = 0
average_results[item] += sub_results[item]
- if len(label_range) == 1: # proposal 不需要输出average值
+ if len(label_range) == 1: # proposal 不需要输出average值
continue
average_results['prec'] = 0.0 if average_results['num_prop'] == 0 \
else average_results['hit_prop'] * 1.0 / average_results['num_prop']
@@ -167,7 +194,13 @@ def evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = False):
(average_results['prec'] + average_results['recall'])
return average_results
-def get_eval_results(predicts, gts_data, phase, iou_threshold = 0.3, score_threshold = 0.3, show_sub = False):
+
+def get_eval_results(predicts,
+ gts_data,
+ phase,
+ iou_threshold=0.3,
+ score_threshold=0.3,
+ show_sub=False):
global mode
mode = phase
res_boxes = []
@@ -186,29 +219,35 @@ def get_eval_results(predicts, gts_data, phase, iou_threshold = 0.3, score_thres
gts = gts_data[basename]['actions']
if phase == 'proposal':
- res_boxes.extend(convert_proposal(ped_data['bmn_results'], basename, score_threshold))
- gts_boxes.extend(convert_groundtruth(gts, basename, phase='proposal'))
+ res_boxes.extend(
+ convert_proposal(ped_data['bmn_results'], basename,
+ score_threshold))
+ gts_boxes.extend(
+ convert_groundtruth(
+ gts, basename, phase='proposal'))
label_range = [0]
iou_range = np.arange(0.1, 1, 0.1)
else:
- res_boxes.extend(convert_classify(ped_data['action_results'], basename, iou_threshold, score_threshold))
+ res_boxes.extend(
+ convert_classify(ped_data['action_results'], basename,
+ iou_threshold, score_threshold))
gts_boxes.extend(convert_groundtruth(gts, basename))
label_range = range(1, len(label_index))
iou_range = np.arange(0.5, 0.6, 0.1)
-
- eval_results = evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = show_sub)
-
+
+ eval_results = evaluation(
+ res_boxes, gts_boxes, label_range, iou_range, show_sub=show_sub)
+
return eval_results
-
+
if __name__ == "__main__":
result_file = sys.argv[1]
predicts = json.load(open(result_file, 'r', encoding='utf-8'))
gts_data = load_gts()
- get_eval_results(predicts, gts_data, 'proposal',
- score_threshold = 0.03,
- show_sub = True)
+ get_eval_results(
+ predicts, gts_data, 'proposal', score_threshold=0.03, show_sub=True)
#get_eval_results(predicts, gts_data, 'actions')
best_F1 = -0.1
@@ -217,10 +256,13 @@ def get_eval_results(predicts, gts_data, phase, iou_threshold = 0.3, score_thres
best_score_threshold = 0.
for iou_threshold in np.arange(0.1, 0.9, 0.1):
for score_threshold in np.arange(0.1, 1, 0.1):
- avg_res = get_eval_results(predicts, gts_data, 'actions',
- iou_threshold = iou_threshold,
- score_threshold = score_threshold,
- show_sub = False)
+ avg_res = get_eval_results(
+ predicts,
+ gts_data,
+ 'actions',
+ iou_threshold=iou_threshold,
+ score_threshold=score_threshold,
+ show_sub=False)
if best_F1 < avg_res['F1']:
best_F1 = avg_res['F1']
best_res = avg_res
@@ -232,8 +274,10 @@ def get_eval_results(predicts, gts_data, phase, iou_threshold = 0.3, score_thres
print_head(0.5)
print_result(best_res)
- get_eval_results(predicts, gts_data, 'actions', iou_threshold = best_iou_threshold,
- score_threshold = best_score_threshold,
- show_sub = True)
-
-
+ get_eval_results(
+ predicts,
+ gts_data,
+ 'actions',
+ iou_threshold=best_iou_threshold,
+ score_threshold=best_score_threshold,
+ show_sub=True)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/predict.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/predict.py
old mode 100644
new mode 100755
index 797ae3b5a..a35d61c68
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/predict.py
@@ -1,4 +1,3 @@
-
import os
import sys
import json
@@ -8,7 +7,7 @@
if __name__ == '__main__':
dataset_dir = "/home/PaddleVideo/applications/FootballAction/datasets/EuroCup2016"
-
+
model_predict = ActionDetection(cfg_file="./configs/configs.yaml")
model_predict.load_model()
@@ -26,10 +25,12 @@
pcm_path = video_name.replace(".mp4", ".pcm").replace("mp4", "pcm")
bmn_results, action_results = model_predict.infer(imgs_path, pcm_path)
- results.append({'video_name': line,
- 'bmn_results': bmn_results,
- 'action_results': action_results})
+ results.append({
+ 'video_name': line,
+ 'bmn_results': bmn_results,
+ 'action_results': action_results
+ })
with open('results.json', 'w', encoding='utf-8') as f:
- data = json.dumps(results, indent=4, ensure_ascii=False)
- f.write(data)
+ data = json.dumps(results, indent=4, ensure_ascii=False)
+ f.write(data)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/results.json b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/predict/results.json
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/run.sh b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/run.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/temp.json b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/temp.json
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/conf/conf.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/conf/conf.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/conf/conf.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/conf/conf.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/inference_model.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/inference_model.py
old mode 100644
new mode 100755
index ae734d182..20c0f95a0
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/inference_model.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/inference_model.py
@@ -79,9 +79,8 @@ def save_inference_model(args):
infer_config = merge_configs(config, 'infer', vars(args))
print_configs(infer_config, "Infer")
#infer_model = models.get_model(args.model_name, infer_config, mode='infer')
- infer_model = action_net.ActionNet(args.model_name,
- infer_config,
- mode='infer')
+ infer_model = action_net.ActionNet(
+ args.model_name, infer_config, mode='infer')
infer_model.build_input(use_pyreader=False)
infer_model.build_model()
infer_feeds = infer_model.feeds()
@@ -93,12 +92,13 @@ def save_inference_model(args):
if args.weights:
assert os.path.exists(
- args.weights), "Given weight dir {} not exist.".format(args.weights)
+ args.weights), "Given weight dir {} not exist.".format(
+ args.weights)
# if no weight files specified, download weights from paddle
weights = args.weights or infer_model.get_weights()
infer_model.load_test_weights_file(exe, weights,
- fluid.default_main_program(), place)
+ fluid.default_main_program(), place)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
@@ -123,4 +123,4 @@ def save_inference_model(args):
logger.info(args)
- save_inference_model(args)
\ No newline at end of file
+ save_inference_model(args)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/accuracy_metrics.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/accuracy_metrics.py
old mode 100644
new mode 100755
index 43298be16..0aa956613
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/accuracy_metrics.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/accuracy_metrics.py
@@ -26,6 +26,7 @@
class MetricsCalculator():
"""MetricsCalculator"""
+
def __init__(self, name, mode, metrics_args):
"""init"""
self.name = name
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/action_net.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/action_net.py
old mode 100644
new mode 100755
index 2de3b00fc..045721d3e
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/action_net.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/action_net.py
@@ -20,6 +20,7 @@
logger = logging.getLogger('LSTM')
+
def is_parameter(var):
"""is_parameter"""
return isinstance(var, fluid.framework.Parameter)
@@ -55,8 +56,8 @@ def get_config(self):
self.num_gpus = self.get_config_from_sec(self.mode, 'num_gpus', 1)
if self.mode == 'train':
- self.learning_rate = self.get_config_from_sec('train',
- 'learning_rate', 1e-3)
+ self.learning_rate = self.get_config_from_sec(
+ 'train', 'learning_rate', 1e-3)
self.weight_decay = self.get_config_from_sec('train',
'weight_decay', 8e-4)
self.num_samples = self.get_config_from_sec('train', 'num_samples',
@@ -65,7 +66,8 @@ def get_config(self):
'decay_epochs', [5])
self.decay_gamma = self.get_config_from_sec('train', 'decay_gamma',
0.1)
- self.droplast = self.get_config_from_sec('train', 'droplast', False)
+ self.droplast = self.get_config_from_sec('train', 'droplast',
+ False)
def get_config_from_sec(self, sec, item, default=None):
"""get_config_from_sec"""
@@ -82,11 +84,13 @@ def load_pretrain_params_file(self, exe, pretrain, prog, place):
load_vars = [x for x in prog.list_vars() \
if isinstance(x, fluid.framework.Parameter) and x.name.find('fc_8') == -1]
- fluid.io.load_vars(exe, dirname=pretrain, vars=load_vars, filename="param")
+ fluid.io.load_vars(
+ exe, dirname=pretrain, vars=load_vars, filename="param")
def load_test_weights_file(self, exe, weights, prog, place):
params_list = list(filter(is_parameter, prog.list_vars()))
fluid.load(prog, weights, executor=exe, var_list=params_list)
+
#def load_test_weights_file(self, exe, weights, prog, place):
# """load_test_weights_file"""
# load_vars = [x for x in prog.list_vars() \
@@ -97,7 +101,6 @@ def epoch_num(self):
"""get train epoch num"""
return self.cfg.TRAIN.epoch
-
def build_input(self, use_pyreader):
"""build_input"""
self.feature_input = []
@@ -117,11 +120,11 @@ def build_input(self, use_pyreader):
assert self.mode != 'infer', \
'pyreader is not recommendated when infer, please set use_pyreader to be false.'
self.py_reader = fluid.io.PyReader(
- feed_list=self.feature_input + [self.label_id_input] + [self.label_iou_input],
+ feed_list=self.feature_input + [self.label_id_input] +
+ [self.label_iou_input],
capacity=1024,
iterable=True)
-
def build_model(self):
"""build_model"""
# ---------------- transfer from old paddle ---------------
@@ -134,8 +137,11 @@ def build_model(self):
bias_attr=ParamAttr(
regularizer=fluid.regularizer.L2Decay(0.0),
initializer=fluid.initializer.NormalInitializer(scale=0.0)))
- lstm_forward, _ = fluid.layers.dynamic_lstm(input=lstm_forward_fc, size=self.lstm_size_img * 4, is_reverse=False,
- use_peepholes=True)
+ lstm_forward, _ = fluid.layers.dynamic_lstm(
+ input=lstm_forward_fc,
+ size=self.lstm_size_img * 4,
+ is_reverse=False,
+ use_peepholes=True)
#lstm_forward_add = fluid.layers.elementwise_add(self.feature_input[0], lstm_forward, act='relu')
#print("lstm_backward_add.shape", lstm_forward_add.shape)
@@ -146,24 +152,30 @@ def build_model(self):
bias_attr=ParamAttr(
regularizer=fluid.regularizer.L2Decay(0.0),
initializer=fluid.initializer.NormalInitializer(scale=0.0)))
- lstm_backward, _ = fluid.layers.dynamic_lstm(input=lstm_backward_fc, size=self.lstm_size_img * 4, is_reverse=True,
- use_peepholes=True)
+ lstm_backward, _ = fluid.layers.dynamic_lstm(
+ input=lstm_backward_fc,
+ size=self.lstm_size_img * 4,
+ is_reverse=True,
+ use_peepholes=True)
#lstm_backward_add = fluid.layers.elementwise_add(self.feature_input[0], lstm_backward, act='relu')
#print("lstm_backward_add.shape", lstm_backward_add.shape)
#lstm_img = fluid.layers.concat(input=[lstm_forward_add, lstm_backward_add], axis=1)
- lstm_img = fluid.layers.concat(input=[lstm_forward, lstm_backward], axis=1)
+ lstm_img = fluid.layers.concat(
+ input=[lstm_forward, lstm_backward], axis=1)
print("lstm_img.shape", lstm_img.shape)
- lstm_dropout = fluid.layers.dropout(x=lstm_img, dropout_prob=self.drop_rate,
- is_test=(not self.mode == 'train'))
- lstm_weight = fluid.layers.fc(
- input=lstm_dropout,
- size=1,
- act='sequence_softmax',
- bias_attr=None)
+ lstm_dropout = fluid.layers.dropout(
+ x=lstm_img,
+ dropout_prob=self.drop_rate,
+ is_test=(not self.mode == 'train'))
+ lstm_weight = fluid.layers.fc(input=lstm_dropout,
+ size=1,
+ act='sequence_softmax',
+ bias_attr=None)
- scaled = fluid.layers.elementwise_mul(x=lstm_dropout, y=lstm_weight, axis=0)
+ scaled = fluid.layers.elementwise_mul(
+ x=lstm_dropout, y=lstm_weight, axis=0)
lstm_pool = fluid.layers.sequence_pool(input=scaled, pool_type='sum')
# ------audio------
lstm_forward_fc_audio = fluid.layers.fc(
@@ -174,7 +186,10 @@ def build_model(self):
regularizer=fluid.regularizer.L2Decay(0.0),
initializer=fluid.initializer.NormalInitializer(scale=0.0)))
lstm_forward_audio, _ = fluid.layers.dynamic_lstm(
- input=lstm_forward_fc_audio, size=self.lstm_size_audio * 4, is_reverse=False, use_peepholes=True)
+ input=lstm_forward_fc_audio,
+ size=self.lstm_size_audio * 4,
+ is_reverse=False,
+ use_peepholes=True)
lsmt_backward_fc_audio = fluid.layers.fc(
input=self.feature_input[1],
@@ -183,23 +198,31 @@ def build_model(self):
bias_attr=ParamAttr(
regularizer=fluid.regularizer.L2Decay(0.0),
initializer=fluid.initializer.NormalInitializer(scale=0.0)))
- lstm_backward_audio, _ = fluid.layers.dynamic_lstm(input=lsmt_backward_fc_audio, size=self.lstm_size_audio * 4,
- is_reverse=True, use_peepholes=True)
-
- lstm_forward_audio = fluid.layers.concat(input=[lstm_forward_audio, lstm_backward_audio], axis=1)
+ lstm_backward_audio, _ = fluid.layers.dynamic_lstm(
+ input=lsmt_backward_fc_audio,
+ size=self.lstm_size_audio * 4,
+ is_reverse=True,
+ use_peepholes=True)
- lstm_dropout_audio = fluid.layers.dropout(x=lstm_forward_audio, dropout_prob=self.drop_rate,
- is_test=(not self.mode == 'train'))
- lstm_weight_audio = fluid.layers.fc(
- input=lstm_dropout_audio,
- size=1,
- act='sequence_softmax',
- bias_attr=None)
+ lstm_forward_audio = fluid.layers.concat(
+ input=[lstm_forward_audio, lstm_backward_audio], axis=1)
- scaled_audio = fluid.layers.elementwise_mul(x=lstm_dropout_audio, y=lstm_weight_audio, axis=0)
- lstm_pool_audio = fluid.layers.sequence_pool(input=scaled_audio, pool_type='sum')
+ lstm_dropout_audio = fluid.layers.dropout(
+ x=lstm_forward_audio,
+ dropout_prob=self.drop_rate,
+ is_test=(not self.mode == 'train'))
+ lstm_weight_audio = fluid.layers.fc(input=lstm_dropout_audio,
+ size=1,
+ act='sequence_softmax',
+ bias_attr=None)
+
+ scaled_audio = fluid.layers.elementwise_mul(
+ x=lstm_dropout_audio, y=lstm_weight_audio, axis=0)
+ lstm_pool_audio = fluid.layers.sequence_pool(
+ input=scaled_audio, pool_type='sum')
# ------ concat -------
- lstm_concat = fluid.layers.concat(input=[lstm_pool, lstm_pool_audio], axis=1)
+ lstm_concat = fluid.layers.concat(
+ input=[lstm_pool, lstm_pool_audio], axis=1)
#print("lstm_concat.shape", lstm_concat.shape)
input_fc_proj = fluid.layers.fc(
@@ -210,12 +233,16 @@ def build_model(self):
bias_attr=ParamAttr(
regularizer=fluid.regularizer.L2Decay(0.0),
initializer=fluid.initializer.NormalInitializer(scale=0.0)))
- input_fc_proj_bn = fluid.layers.batch_norm(input=input_fc_proj, act="relu",
+ input_fc_proj_bn = fluid.layers.batch_norm(
+ input=input_fc_proj,
+ act="relu",
is_test=(not self.mode == 'train'))
# model remove bn when batch_size is small
if not self.with_bn:
input_fc_proj_bn = 0 * input_fc_proj_bn + input_fc_proj
- input_fc_proj_dropout = fluid.layers.dropout(x=input_fc_proj_bn, dropout_prob=self.drop_rate,
+ input_fc_proj_dropout = fluid.layers.dropout(
+ x=input_fc_proj_bn,
+ dropout_prob=self.drop_rate,
is_test=(not self.mode == 'train'))
input_fc_hidden = fluid.layers.fc(
@@ -225,23 +252,25 @@ def build_model(self):
bias_attr=ParamAttr(
regularizer=fluid.regularizer.L2Decay(0.0),
initializer=fluid.initializer.NormalInitializer(scale=0.0)))
- input_fc_hidden_bn = fluid.layers.batch_norm(input=input_fc_hidden, act="relu",
+ input_fc_hidden_bn = fluid.layers.batch_norm(
+ input=input_fc_hidden,
+ act="relu",
is_test=(not self.mode == 'train'))
# model remove bn when batch_size is small
if not self.with_bn:
input_fc_hidden_bn = 0 * input_fc_hidden_bn + input_fc_hidden
- input_fc_hidden_dropout = fluid.layers.dropout(x=input_fc_hidden_bn, dropout_prob=self.drop_rate,
+ input_fc_hidden_dropout = fluid.layers.dropout(
+ x=input_fc_hidden_bn,
+ dropout_prob=self.drop_rate,
is_test=(not self.mode == 'train'))
- self.fc = fluid.layers.fc(
- input=input_fc_hidden_dropout,
- size=self.num_classes,
- act='softmax')
- self.fc_iou = fluid.layers.fc(
- input=input_fc_hidden_dropout,
- size=1,
- act="sigmoid")
+ self.fc = fluid.layers.fc(input=input_fc_hidden_dropout,
+ size=self.num_classes,
+ act='softmax')
+ self.fc_iou = fluid.layers.fc(input=input_fc_hidden_dropout,
+ size=1,
+ act="sigmoid")
self.network_outputs = [self.fc, self.fc_iou]
-
+
def optimizer(self):
"""optimizer"""
assert self.mode == 'train', "optimizer only can be get in train mode"
@@ -250,11 +279,17 @@ def optimizer(self):
for i in range(len(self.decay_epochs) + 1)
]
if self.droplast:
- self.num_samples = math.floor(float(self.num_samples) / float(self.batch_size)) * self.batch_size
- iter_per_epoch = math.floor(float(self.num_samples) / self.batch_size)
+ self.num_samples = math.floor(
+ float(self.num_samples) /
+ float(self.batch_size)) * self.batch_size
+ iter_per_epoch = math.floor(
+ float(self.num_samples) / self.batch_size)
else:
- self.num_samples = math.ceil(float(self.num_samples) / float(self.batch_size)) * self.batch_size
- iter_per_epoch = math.ceil(float(self.num_samples) / self.batch_size)
+ self.num_samples = math.ceil(
+ float(self.num_samples) /
+ float(self.batch_size)) * self.batch_size
+ iter_per_epoch = math.ceil(
+ float(self.num_samples) / self.batch_size)
boundaries = [e * iter_per_epoch for e in self.decay_epochs]
logger.info("num_sample = {}, batchsize = {}, iter_per_epoch = {}, lr_int = {}, boundaries = {} "
@@ -265,9 +300,14 @@ def optimizer(self):
learning_rate=fluid.layers.piecewise_decay(
values=values, boundaries=boundaries),
centered=True,
- regularization=fluid.regularizer.L2Decay(regularization_coeff=self.weight_decay))
-
- def _calc_label_smoothing_loss(self, softmax_out, label, class_dim, epsilon=0.1):
+ regularization=fluid.regularizer.L2Decay(
+ regularization_coeff=self.weight_decay))
+
+ def _calc_label_smoothing_loss(self,
+ softmax_out,
+ label,
+ class_dim,
+ epsilon=0.1):
"""Calculate label smoothing loss
Returns:
label smoothing loss
@@ -284,19 +324,22 @@ def loss(self):
loss
"""
assert self.mode != 'infer', "invalid loss calculationg in infer mode"
- cost_cls = fluid.layers.cross_entropy(input=self.network_outputs[0], label=self.label_id_input)
+ cost_cls = fluid.layers.cross_entropy(
+ input=self.network_outputs[0], label=self.label_id_input)
cost_cls = fluid.layers.reduce_sum(cost_cls, dim=-1)
sum_cost_cls = fluid.layers.reduce_sum(cost_cls)
- self.loss_cls_ = fluid.layers.scale(sum_cost_cls, scale=self.num_gpus, bias_after_scale=False)
- cost_iou = fluid.layers.square_error_cost(input=self.network_outputs[1], label=self.label_iou_input)
+ self.loss_cls_ = fluid.layers.scale(
+ sum_cost_cls, scale=self.num_gpus, bias_after_scale=False)
+ cost_iou = fluid.layers.square_error_cost(
+ input=self.network_outputs[1], label=self.label_iou_input)
cost_iou = fluid.layers.reduce_sum(cost_iou, dim=-1)
sum_cost_iou = fluid.layers.reduce_sum(cost_iou)
- self.loss_iou_ = fluid.layers.scale(sum_cost_iou, scale=self.num_gpus, bias_after_scale=False)
+ self.loss_iou_ = fluid.layers.scale(
+ sum_cost_iou, scale=self.num_gpus, bias_after_scale=False)
alpha = 10
self.loss_ = self.loss_cls_ + alpha * self.loss_iou_
return self.loss_
-
def outputs(self):
"""outputs"""
return self.network_outputs
@@ -306,7 +349,8 @@ def feeds(self):
feeds
"""
return self.feature_input if self.mode == 'infer' else self.feature_input + [
- self.label_id_input, self.label_iou_input]
+ self.label_id_input, self.label_iou_input
+ ]
def fetches(self):
"""fetches"""
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/config.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/config.py
old mode 100644
new mode 100755
index e43158408..45a68a11c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/config.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/config.py
@@ -63,8 +63,8 @@ def merge_configs(cfg, sec, args_dict):
def print_configs(cfg, mode):
"""print_configs"""
logger = logging.getLogger('LSTM')
- logger.info(
- "---------------- {:>5} Arguments ----------------".format(mode))
+ logger.info("---------------- {:>5} Arguments ----------------".format(
+ mode))
for sec, sec_items in cfg.items():
logger.info("{}:".format(sec))
for k, v in sec_items.items():
@@ -74,6 +74,7 @@ def print_configs(cfg, mode):
class AttrDict(dict):
"""AttrDict"""
+
def __getattr__(self, key):
return self[key]
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/feature_reader.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/feature_reader.py
old mode 100644
new mode 100755
index 24862fe2f..09c45aca4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/feature_reader.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/feature_reader.py
@@ -40,6 +40,7 @@ class FeatureReader:
list
NextVlad only: eigen_file
"""
+
def __init__(self, name, mode, cfg, bs_denominator):
self.name = name
self.mode = mode
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/train.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/train.py
old mode 100644
new mode 100755
index abae594a9..59d0212cc
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/train.py
@@ -38,14 +38,16 @@
def parse_args():
"""parse_args"""
parser = argparse.ArgumentParser("Paddle Video train script")
- parser.add_argument('--model_name',
- type=str,
- default='BaiduNet',
- help='name of model to train.')
- parser.add_argument('--config',
- type=str,
- default='configs/conf.txt',
- help='path to config file of model')
+ parser.add_argument(
+ '--model_name',
+ type=str,
+ default='BaiduNet',
+ help='name of model to train.')
+ parser.add_argument(
+ '--config',
+ type=str,
+ default='configs/conf.txt',
+ help='path to config file of model')
parser.add_argument(
'--batch_size',
type=int,
@@ -60,8 +62,7 @@ def parse_args():
'--pretrain',
type=str,
default=None,
- help=
- 'path to pretrain weights. None to use default weights path in ~/.paddle/weights.'
+ help='path to pretrain weights. None to use default weights path in ~/.paddle/weights.'
)
parser.add_argument(
'--resume',
@@ -69,30 +70,36 @@ def parse_args():
default=None,
help='path to resume training based on previous checkpoints. '
'None for not resuming any checkpoints.')
- parser.add_argument('--use_gpu',
- type=ast.literal_eval,
- default=True,
- help='default use gpu.')
- parser.add_argument('--no_memory_optimize',
- action='store_true',
- default=False,
- help='whether to use memory optimize in train')
- parser.add_argument('--epoch_num',
- type=int,
- default=0,
- help='epoch number, 0 for read from config file')
- parser.add_argument('--valid_interval',
- type=int,
- default=1,
- help='validation epoch interval, 0 for no validation.')
- parser.add_argument('--save_dir',
- type=str,
- default='checkpoints',
- help='directory name to save train snapshoot')
- parser.add_argument('--log_interval',
- type=int,
- default=10,
- help='mini-batch interval to log.')
+ parser.add_argument(
+ '--use_gpu',
+ type=ast.literal_eval,
+ default=True,
+ help='default use gpu.')
+ parser.add_argument(
+ '--no_memory_optimize',
+ action='store_true',
+ default=False,
+ help='whether to use memory optimize in train')
+ parser.add_argument(
+ '--epoch_num',
+ type=int,
+ default=0,
+ help='epoch number, 0 for read from config file')
+ parser.add_argument(
+ '--valid_interval',
+ type=int,
+ default=1,
+ help='validation epoch interval, 0 for no validation.')
+ parser.add_argument(
+ '--save_dir',
+ type=str,
+ default='checkpoints',
+ help='directory name to save train snapshoot')
+ parser.add_argument(
+ '--log_interval',
+ type=int,
+ default=10,
+ help='mini-batch interval to log.')
args = parser.parse_args()
return args
@@ -140,8 +147,8 @@ def train(args):
# train_learning_rate_in = train_config.TRAIN.learning_rate
train_config.TRAIN.batch_size = min(
int(train_config.TRAIN.num_samples / 10), train_batch_size_in)
- train_config.TRAIN.batch_size = int(
- train_config.TRAIN.batch_size / bs_denominator) * bs_denominator
+ train_config.TRAIN.batch_size = int(train_config.TRAIN.batch_size /
+ bs_denominator) * bs_denominator
train_config.TRAIN.batch_size = max(train_config.TRAIN.batch_size,
bs_denominator)
# train_config.TRAIN.learning_rate = float(train_learning_rate_in) / float(train_batch_size_in) \
@@ -150,8 +157,8 @@ def train(args):
val_batch_size_in = valid_config.VALID.batch_size
valid_config.VALID.batch_size = min(
int(valid_config.VALID.num_samples / 10), val_batch_size_in)
- valid_config.VALID.batch_size = int(
- valid_config.VALID.batch_size / bs_denominator) * bs_denominator
+ valid_config.VALID.batch_size = int(valid_config.VALID.batch_size /
+ bs_denominator) * bs_denominator
valid_config.VALID.batch_size = max(valid_config.VALID.batch_size,
bs_denominator)
@@ -165,12 +172,10 @@ def train(args):
valid_config.MODEL.with_bn = True
config.print_configs(train_config, 'Train')
- train_model = action_net.ActionNet(args.model_name,
- train_config,
- mode='train')
- valid_model = action_net.ActionNet(args.model_name,
- valid_config,
- mode='valid')
+ train_model = action_net.ActionNet(
+ args.model_name, train_config, mode='train')
+ valid_model = action_net.ActionNet(
+ args.model_name, valid_config, mode='valid')
# build model
startup = fluid.Program()
@@ -216,10 +221,8 @@ def train(args):
def if_exist(var):
return os.path.exists(os.path.join(args.resume, var.name))
- fluid.io.load_vars(exe,
- args.resume,
- predicate=if_exist,
- main_program=train_prog)
+ fluid.io.load_vars(
+ exe, args.resume, predicate=if_exist, main_program=train_prog)
else:
# if not in resume mode, load pretrain weights
if args.pretrain:
@@ -234,25 +237,23 @@ def if_exist(var):
build_strategy.enable_inplace = True
compiled_train_prog = fluid.compiler.CompiledProgram(
- train_prog).with_data_parallel(loss_name=train_loss.name,
- build_strategy=build_strategy)
+ train_prog).with_data_parallel(
+ loss_name=train_loss.name, build_strategy=build_strategy)
compiled_valid_prog = fluid.compiler.CompiledProgram(
- valid_prog).with_data_parallel(share_vars_from=compiled_train_prog,
- build_strategy=build_strategy)
+ valid_prog).with_data_parallel(
+ share_vars_from=compiled_train_prog, build_strategy=build_strategy)
# get reader
train_config.TRAIN.batch_size = int(train_config.TRAIN.batch_size /
bs_denominator)
valid_config.VALID.batch_size = int(valid_config.VALID.batch_size /
bs_denominator)
print("config setting")
- train_dataload = feature_reader.FeatureReader(args.model_name.upper(),
- 'train', train_config,
- bs_denominator)
+ train_dataload = feature_reader.FeatureReader(
+ args.model_name.upper(), 'train', train_config, bs_denominator)
train_reader = train_dataload.create_reader()
print("train reader")
- valid_dataload = feature_reader.FeatureReader(args.model_name.upper(),
- 'valid', valid_config,
- bs_denominator)
+ valid_dataload = feature_reader.FeatureReader(
+ args.model_name.upper(), 'valid', valid_config, bs_denominator)
valid_reader = valid_dataload.create_reader()
# get metrics
@@ -265,10 +266,10 @@ def if_exist(var):
print("epoch is ", epochs)
exe_places = fluid.cuda_places() if args.use_gpu else fluid.cpu_places()
- train_pyreader.decorate_sample_list_generator(train_reader,
- places=exe_places)
- valid_pyreader.decorate_sample_list_generator(valid_reader,
- places=exe_places)
+ train_pyreader.decorate_sample_list_generator(
+ train_reader, places=exe_places)
+ valid_pyreader.decorate_sample_list_generator(
+ valid_reader, places=exe_places)
utils.train_with_pyreader(
exe,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/utils.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/utils.py
old mode 100644
new mode 100755
index 0d1e97dbe..360b1d66f
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_lstm/scenario_lib/utils.py
@@ -127,14 +127,15 @@ def train_with_pyreader(exe, train_prog, compiled_train_prog, train_pyreader, \
pred_iou = np.array(train_outs[2])
label = np.array(train_outs[-2])
iou = np.array(train_outs[-1])
- train_metrics.accumulate(loss, pred_label, label, pred_iou, iou)
+ train_metrics.accumulate(loss, pred_label, label, pred_iou,
+ iou)
if log_interval > 0 and (train_iter % log_interval == 0):
train_metrics.finalize_and_log_out( \
info='[TRAIN] Epoch {}, iter {} average: '.format(epoch, train_iter))
except Exception as e:
logger.info(
- "[TRAIN] Epoch {}, iter {} data training failed: {}".format(
- epoch, train_iter, str(e)))
+ "[TRAIN] Epoch {}, iter {} data training failed: {}".
+ format(epoch, train_iter, str(e)))
train_iter += 1
if len(epoch_periods) < 1:
@@ -142,9 +143,8 @@ def train_with_pyreader(exe, train_prog, compiled_train_prog, train_pyreader, \
'No iteration was executed, please check the data reader')
sys.exit(1)
- logger.info(
- '[TRAIN] Epoch {} training finished, average time: {}'.format(
- epoch, np.mean(epoch_periods)))
+ logger.info('[TRAIN] Epoch {} training finished, average time: {}'.
+ format(epoch, np.mean(epoch_periods)))
train_metrics.finalize_and_log_out( \
info='[TRAIN] Finished ... Epoch {} all iters average: '.format(epoch))
@@ -160,8 +160,8 @@ def train_with_pyreader(exe, train_prog, compiled_train_prog, train_pyreader, \
global best_test_acc1
global min_test_loss
if test_status and (test_acc1 > best_test_acc1 or
- (test_acc1 == best_test_acc1
- and test_loss < min_test_loss)):
+ (test_acc1 == best_test_acc1 and
+ test_loss < min_test_loss)):
best_test_acc1 = test_acc1
min_test_loss = test_loss
save_postfix = "_epoch{}_acc{}".format(epoch, best_test_acc1)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_proposal/configs/bmn_football_v2.0.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_proposal/configs/bmn_football_v2.0.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_proposal/configs/pptsm_football_v2.0.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_proposal/configs/pptsm_football_v2.0.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_proposal/configs/tsm_football.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_proposal/configs/tsm_football.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_proposal/configs/tsn_football.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/FootballAction/train_proposal/configs/tsn_football.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/README.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/applications/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/detection/ava/ava.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/detection/ava/ava.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/detection/ava/ava_part.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/detection/ava/ava_part.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/estimation/adds/adds.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/estimation/adds/adds.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/example.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/example.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/localization/bmn.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/localization/bmn.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/localization/bsn.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/localization/bsn.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/localization/ctcn.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/localization/ctcn.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/multimodal/actbert/actbert.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/multimodal/actbert/actbert.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/multimodal/actbert/actbert_msrvtt.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/multimodal/actbert/actbert_msrvtt.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/partitioners/transnetv2/transnetv2.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/partitioners/transnetv2/transnetv2.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/agcn/agcn_fsd.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/agcn/agcn_fsd.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/agcn/agcn_ntucs.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/agcn/agcn_ntucs.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/attention_lstm/attention_lstm_youtube8m.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/attention_lstm/attention_lstm_youtube8m.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/non_local/non_local.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/non_local/non_local.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptimesformer/pptimesformer_k400_videos.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptimesformer/pptimesformer_k400_videos.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsm/pptsm_k400_frames_dense.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsm/pptsm_k400_frames_dense.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsm/pptsm_k400_frames_dense_r101.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsm/pptsm_k400_frames_dense_r101.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsm/pptsm_k400_frames_uniform.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsm/pptsm_k400_frames_uniform.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsm/pptsm_k400_videos_uniform.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsm/pptsm_k400_videos_uniform.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsn/pptsn_k400_frames.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsn/pptsn_k400_frames.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsn/pptsn_k400_videos.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/pptsn/pptsn_k400_videos.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/slowfast/slowfast.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/slowfast/slowfast.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/slowfast/slowfast_multigrid.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/slowfast/slowfast_multigrid.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/stgcn/stgcn_fsd.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/stgcn/stgcn_fsd.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/stgcn/stgcn_ntucs.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/stgcn/stgcn_ntucs.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/timesformer/timesformer_k400_videos.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/timesformer/timesformer_k400_videos.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_k400_frames.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_k400_frames.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_k400_frames_nhwc.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_k400_frames_nhwc.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_k400_videos.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_k400_videos.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_ucf101_frames.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_ucf101_frames.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_ucf101_frames_nhwc.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_ucf101_frames_nhwc.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_ucf101_videos.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsm/tsm_ucf101_videos.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsn/tsn_dali.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsn/tsn_dali.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsn/tsn_k400_frames.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsn/tsn_k400_frames.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsn/tsn_k400_videos.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/tsn/tsn_k400_videos.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/videoswin/videoswin_k400_videos.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/recognition/videoswin/videoswin_k400_videos.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/segmentation/cfbip_davis.yaml b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/configs/segmentation/cfbip_davis.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/CMakeLists.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/CMakeLists.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/example_video_dir/example01.avi b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/example_video_dir/example01.avi
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/external-cmake/auto-log.cmake b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/external-cmake/auto-log.cmake
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/imgs/PPTSM_pred_result.png b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/imgs/PPTSM_pred_result.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/include/postprocess_op.h b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/include/postprocess_op.h
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/include/preprocess_op.h b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/include/preprocess_op.h
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/include/utility.h b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/include/utility.h
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/include/video_rec.h b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/include/video_rec.h
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/readme.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/readme.md
old mode 100644
new mode 100755
index af8a4b7b3..13ccb46ad
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/readme.md
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/readme.md
@@ -165,9 +165,9 @@ PaddleVideo模型部署。
```
inference/
└── ppTSM/
- ├── ppTSM.pdiparams
- ├── ppTSM.pdiparamsinfo
- └── ppTSM.pdmodel
+ ├── ppTSM.pdiparams
+ ├── ppTSM.pdiparamsinfo
+ └── ppTSM.pdmodel
```
@@ -264,7 +264,7 @@ PaddleVideo模型部署。
| rec_batch_num | int | 1 | 模型预测时的batch size |
| char_list_file | str | "../../data/k400/Kinetics-400_label_list.txt" | 存放所有类别标号和对应名字的文本路径 |
- 以example_video_dir下的样例视频`example01.avi`为输入视频为例,最终屏幕上会输出检测结果如下。
+ 以example_video_dir下的样例视频`example01.avi`为输入视频为例,最终屏幕上会输出检测结果如下。
```bash
[./inference/ppTSM]
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/readme_en.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/readme_en.md
old mode 100644
new mode 100755
index dbd510f88..9f22d9f11
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/readme_en.md
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/readme_en.md
@@ -263,7 +263,7 @@ More parameters are as follows:
| rec_batch_num | int | 1 | Batch size during model prediction |
| char_list_file | str | "../../data/k400/Kinetics-400_label_list.txt" | The text path for storing all category labels and corresponding names |
- Take the sample video `example01.avi` under example_video_dir as the input video as an example, the final screen will output the detection results as follows.
+ Take the sample video `example01.avi` under example_video_dir as the input video as an example, the final screen will output the detection results as follows.
```bash
[./inference/ppTSM]
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/main.cpp b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/main.cpp
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/postprocess_op.cpp b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/postprocess_op.cpp
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/preprocess_op.cpp b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/preprocess_op.cpp
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/utility.cpp b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/utility.cpp
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/video_rec.cpp b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/src/video_rec.cpp
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/tools/build.sh b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/deploy/cpp_infer/tools/build.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/main.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/main.py
old mode 100644
new mode 100755
index 0f23d0b0e..fa1c58f2d
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/main.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/main.py
@@ -24,35 +24,36 @@
def parse_args():
parser = argparse.ArgumentParser("PaddleVideo train script")
- parser.add_argument('-c',
- '--config',
- type=str,
- default='configs/example.yaml',
- help='config file path')
- parser.add_argument('-o',
- '--override',
- action='append',
- default=[],
- help='config options to be overridden')
- parser.add_argument('--test',
- action='store_true',
- help='whether to test a model')
- parser.add_argument('--train_dali',
- action='store_true',
- help='whether to use dali to speed up training')
- parser.add_argument('--multigrid',
- action='store_true',
- help='whether to use multigrid training')
- parser.add_argument('-w',
- '--weights',
- type=str,
- help='weights for finetuning or testing')
- parser.add_argument('--fleet',
- action='store_true',
- help='whether to use fleet run distributed training')
- parser.add_argument('--amp',
- action='store_true',
- help='whether to open amp training.')
+ parser.add_argument(
+ '-c',
+ '--config',
+ type=str,
+ default='configs/example.yaml',
+ help='config file path')
+ parser.add_argument(
+ '-o',
+ '--override',
+ action='append',
+ default=[],
+ help='config options to be overridden')
+ parser.add_argument(
+ '--test', action='store_true', help='whether to test a model')
+ parser.add_argument(
+ '--train_dali',
+ action='store_true',
+ help='whether to use dali to speed up training')
+ parser.add_argument(
+ '--multigrid',
+ action='store_true',
+ help='whether to use multigrid training')
+ parser.add_argument(
+ '-w', '--weights', type=str, help='weights for finetuning or testing')
+ parser.add_argument(
+ '--fleet',
+ action='store_true',
+ help='whether to use fleet run distributed training')
+ parser.add_argument(
+ '--amp', action='store_true', help='whether to open amp training.')
parser.add_argument(
'--validate',
action='store_true',
@@ -74,10 +75,8 @@ def parse_args():
default=None,
help='The option of profiler, which should be in format '
'\"key1=value1;key2=value2;key3=value3\".')
- parser.add_argument('--use_npu',
- type=bool,
- default=False,
- help='whether use npu.')
+ parser.add_argument(
+ '--use_npu', type=bool, default=False, help='whether use npu.')
args = parser.parse_args()
return args
@@ -106,18 +105,18 @@ def main():
elif args.train_dali:
train_dali(cfg, weights=args.weights, parallel=parallel)
elif args.multigrid:
- train_model_multigrid(cfg,
- world_size=world_size,
- validate=args.validate)
+ train_model_multigrid(
+ cfg, world_size=world_size, validate=args.validate)
else:
- train_model(cfg,
- weights=args.weights,
- parallel=parallel,
- validate=args.validate,
- use_fleet=args.fleet,
- amp=args.amp,
- max_iters=args.max_iters,
- profiler_options=args.profiler_options)
+ train_model(
+ cfg,
+ weights=args.weights,
+ parallel=parallel,
+ validate=args.validate,
+ use_fleet=args.fleet,
+ amp=args.amp,
+ max_iters=args.max_iters,
+ profiler_options=args.profiler_options)
if __name__ == '__main__':
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/builder.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/builder.py
old mode 100644
new mode 100755
index 23a65c3bf..527311100
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/builder.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/builder.py
@@ -76,15 +76,14 @@ def build_dataloader(dataset,
shuffle(bool): whether to shuffle the data at every epoch.
"""
if multigrid:
- sampler = DistributedShortSampler(dataset,
- batch_sizes=batch_size,
- shuffle=True,
- drop_last=True)
+ sampler = DistributedShortSampler(
+ dataset, batch_sizes=batch_size, shuffle=True, drop_last=True)
else:
- sampler = DistributedBatchSampler(dataset,
- batch_size=batch_size,
- shuffle=shuffle,
- drop_last=drop_last)
+ sampler = DistributedBatchSampler(
+ dataset,
+ batch_size=batch_size,
+ shuffle=shuffle,
+ drop_last=drop_last)
#NOTE(shipping): when switch the mix operator on, such as: mixup, cutmix.
# batch like: [[img, label, attibute, ...], [imgs, label, attribute, ...], ...] will recollate to:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dali_loader.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dali_loader.py
old mode 100644
new mode 100755
index 73fe64fcc..79bc9c0f0
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dali_loader.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dali_loader.py
@@ -57,6 +57,7 @@ def build_dali_reader(self):
"""
build dali training reader
"""
+
def reader_():
with open(self.file_path) as flist:
full_lines = [line for line in flist]
@@ -99,28 +100,30 @@ def reader_():
device_id = ParallelEnv().local_rank
logger.info(f'---------- device_id: {device_id} -----------')
- pipe = VideoPipe(batch_size=self.batch_size,
- num_threads=1,
- device_id=device_id,
- file_list=video_files,
- sequence_length=self.num_seg * self.seglen,
- num_seg=self.num_seg,
- seg_length=self.seglen,
- resize_shorter_scale=self.short_size,
- crop_target_size=self.target_size,
- is_training=True,
- num_shards=self.num_shards,
- shard_id=self.shard_id,
- dali_mean=self.dali_mean,
- dali_std=self.dali_std)
+ pipe = VideoPipe(
+ batch_size=self.batch_size,
+ num_threads=1,
+ device_id=device_id,
+ file_list=video_files,
+ sequence_length=self.num_seg * self.seglen,
+ num_seg=self.num_seg,
+ seg_length=self.seglen,
+ resize_shorter_scale=self.short_size,
+ crop_target_size=self.target_size,
+ is_training=True,
+ num_shards=self.num_shards,
+ shard_id=self.shard_id,
+ dali_mean=self.dali_mean,
+ dali_std=self.dali_std)
logger.info(
'initializing dataset, it will take several minutes if it is too large .... '
)
- video_loader = DALIGenericIterator([pipe], ['image', 'label'],
- len(lines),
- dynamic_shape=True,
- auto_reset=True)
+ video_loader = DALIGenericIterator(
+ [pipe], ['image', 'label'],
+ len(lines),
+ dynamic_shape=True,
+ auto_reset=True)
return video_loader
@@ -146,26 +149,26 @@ def __init__(self,
dali_mean=0.,
dali_std=1.0):
super(VideoPipe, self).__init__(batch_size, num_threads, device_id)
- self.input = ops.VideoReader(device="gpu",
- file_list=file_list,
- sequence_length=sequence_length,
- num_seg=num_seg,
- seg_length=seg_length,
- is_training=is_training,
- num_shards=num_shards,
- shard_id=shard_id,
- random_shuffle=is_training,
- initial_fill=initial_prefetch_size)
+ self.input = ops.VideoReader(
+ device="gpu",
+ file_list=file_list,
+ sequence_length=sequence_length,
+ num_seg=num_seg,
+ seg_length=seg_length,
+ is_training=is_training,
+ num_shards=num_shards,
+ shard_id=shard_id,
+ random_shuffle=is_training,
+ initial_fill=initial_prefetch_size)
# the sequece data read by ops.VideoReader is of shape [F, H, W, C]
# Because the ops.Resize does not support sequence data,
# it will be transposed into [H, W, F, C],
# then reshaped to [H, W, FC], and then resized like a 2-D image.
self.transpose = ops.Transpose(device="gpu", perm=[1, 2, 0, 3])
- self.reshape = ops.Reshape(device="gpu",
- rel_shape=[1.0, 1.0, -1],
- layout='HWC')
- self.resize = ops.Resize(device="gpu",
- resize_shorter=resize_shorter_scale)
+ self.reshape = ops.Reshape(
+ device="gpu", rel_shape=[1.0, 1.0, -1], layout='HWC')
+ self.resize = ops.Resize(
+ device="gpu", resize_shorter=resize_shorter_scale)
# crops and mirror are applied by ops.CropMirrorNormalize.
# Normalization will be implemented in paddle due to the difficulty of dimension broadcast,
# It is not sure whether dimension broadcast can be implemented correctly by dali, just take the Paddle Op instead.
@@ -180,9 +183,12 @@ def __init__(self,
std=dali_std)
self.reshape_back = ops.Reshape(
device="gpu",
- shape=[num_seg, seg_length * 3, crop_target_size, crop_target_size],
+ shape=[
+ num_seg, seg_length * 3, crop_target_size, crop_target_size
+ ],
layout='FCHW')
- self.cast_label = ops.Cast(device="gpu", dtype=types.DALIDataType.INT64)
+ self.cast_label = ops.Cast(
+ device="gpu", dtype=types.DALIDataType.INT64)
def define_graph(self):
output, label = self.input(name="Reader")
@@ -196,10 +202,8 @@ def define_graph(self):
mirror_flag = self.mirror_generator()
mirror_flag = (mirror_flag > 0.5)
mirror_flag = self.cast_mirror(mirror_flag)
- output = self.crop_mirror_norm(output,
- crop_pos_x=pos_x,
- crop_pos_y=pos_y,
- mirror=mirror_flag)
+ output = self.crop_mirror_norm(
+ output, crop_pos_x=pos_x, crop_pos_y=pos_y, mirror=mirror_flag)
output = self.reshape_back(output)
label = self.cast_label(label)
return output, label
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/MRI.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/MRI.py
old mode 100644
new mode 100755
index 990cb87bd..9b2e6d56e
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/MRI.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/MRI.py
@@ -46,6 +46,7 @@ class MRIDataset(BaseDataset):
suffix (str): suffix of file. Default: 'img_{:05}.jpg'.
"""
+
def __init__(self,
file_path,
pipeline,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/MRI_SlowFast.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/MRI_SlowFast.py
old mode 100644
new mode 100755
index db905e4e4..f4356c54d
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/MRI_SlowFast.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/MRI_SlowFast.py
@@ -46,6 +46,7 @@ class SFMRIDataset(BaseDataset):
suffix (str): suffix of file. Default: 'img_{:05}.jpg'.
"""
+
def __init__(self,
file_path,
pipeline,
@@ -89,8 +90,8 @@ def prepare_train(self, idx):
format(results['frame_dir'], ir))
idx = random.randint(0, len(self.info) - 1)
continue
- return np.array(results['imgs'][0]), np.array(
- results['imgs'][1]), np.array([results['labels']])
+ return np.array(results['imgs'][0]), np.array(results['imgs'][
+ 1]), np.array([results['labels']])
def prepare_test(self, idx):
"""Prepare the frames for test given index. """
@@ -107,5 +108,5 @@ def prepare_test(self, idx):
format(results['frame_dir'], ir))
idx = random.randint(0, len(self.info) - 1)
continue
- return np.array(results['imgs'][0]), np.array(
- results['imgs'][1]), np.array([results['labels']])
+ return np.array(results['imgs'][0]), np.array(results['imgs'][
+ 1]), np.array([results['labels']])
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/actbert_dataset.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/actbert_dataset.py
old mode 100644
new mode 100755
index 5c50adb62..a2c3289e5
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/actbert_dataset.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/actbert_dataset.py
@@ -32,22 +32,22 @@
class ActBertDataset(BaseDataset):
"""ActBert dataset.
"""
+
def __init__(
- self,
- file_path,
- pipeline,
- bert_model="bert-base-uncased",
- data_prefix=None,
- test_mode=False,
- ):
+ self,
+ file_path,
+ pipeline,
+ bert_model="bert-base-uncased",
+ data_prefix=None,
+ test_mode=False, ):
self.bert_model = bert_model
super().__init__(file_path, pipeline, data_prefix, test_mode)
def load_file(self):
"""Load index file to get video information."""
feature_data = np.load(self.file_path, allow_pickle=True)
- self.tokenizer = BertTokenizer.from_pretrained(self.bert_model,
- do_lower_case=True)
+ self.tokenizer = BertTokenizer.from_pretrained(
+ self.bert_model, do_lower_case=True)
self.info = []
for item in feature_data:
self.info.append(dict(feature=item, tokenizer=self.tokenizer))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/ava_dataset.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/ava_dataset.py
old mode 100644
new mode 100755
index 744e15bb6..e82cfa9d4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/ava_dataset.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/ava_dataset.py
@@ -66,8 +66,7 @@ def __init__(self,
file_path,
pipeline,
data_prefix,
- test_mode,
- )
+ test_mode, )
if self.proposal_file is not None:
self.proposals = self._load(self.proposal_file)
else:
@@ -89,18 +88,19 @@ def parse_img_record(self, img_records):
num_img_records = len(img_records)
selected_records = list(
filter(
- lambda x: np.array_equal(x['entity_box'], img_record[
- 'entity_box']), img_records))
+ lambda x: np.array_equal(x['entity_box'], img_record['entity_box']),
+ img_records))
num_selected_records = len(selected_records)
img_records = list(
filter(
- lambda x: not np.array_equal(x['entity_box'], img_record[
- 'entity_box']), img_records))
+ lambda x: not np.array_equal(x['entity_box'], img_record['entity_box']),
+ img_records))
assert len(img_records) + num_selected_records == num_img_records
bboxes.append(img_record['entity_box'])
valid_labels = np.array([
- selected_record['label'] for selected_record in selected_records
+ selected_record['label']
+ for selected_record in selected_records
])
label = np.zeros(self.num_classes, dtype=np.float32)
@@ -125,8 +125,8 @@ def filter_exclude_file(self):
for i, video_info in enumerate(self.info):
valid_indexes.append(i)
for video_id, timestamp in exclude_video_infos:
- if (video_info['video_id'] == video_id
- and video_info['timestamp'] == int(timestamp)):
+ if (video_info['video_id'] == video_id and
+ video_info['timestamp'] == int(timestamp)):
valid_indexes.pop()
break
return valid_indexes
@@ -149,31 +149,32 @@ def load_file(self):
shot_info = (0, (self.timestamp_end - self.timestamp_start) *
self._FPS)
- video_info = dict(video_id=video_id,
- timestamp=timestamp,
- entity_box=entity_box,
- label=label,
- entity_id=entity_id,
- shot_info=shot_info)
+ video_info = dict(
+ video_id=video_id,
+ timestamp=timestamp,
+ entity_box=entity_box,
+ label=label,
+ entity_id=entity_id,
+ shot_info=shot_info)
records_dict_by_img[img_key].append(video_info)
for img_key in records_dict_by_img:
video_id, timestamp = img_key.split(',')
bboxes, labels, entity_ids = self.parse_img_record(
records_dict_by_img[img_key])
- ann = dict(gt_bboxes=bboxes,
- gt_labels=labels,
- entity_ids=entity_ids)
+ ann = dict(
+ gt_bboxes=bboxes, gt_labels=labels, entity_ids=entity_ids)
frame_dir = video_id
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
- video_info = dict(frame_dir=frame_dir,
- video_id=video_id,
- timestamp=int(timestamp),
- img_key=img_key,
- shot_info=shot_info,
- fps=self._FPS,
- ann=ann)
+ video_info = dict(
+ frame_dir=frame_dir,
+ video_id=video_id,
+ timestamp=int(timestamp),
+ img_key=img_key,
+ shot_info=shot_info,
+ fps=self._FPS,
+ ann=ann)
info.append(video_info)
return info
@@ -230,8 +231,8 @@ def prepare_train(self, idx):
), idx, len_proposals, len_gt_bboxes, len_gt_labels, len_scores, len_entity_ids
def my_padding_2d(self, feat, max_len):
- feat_add = np.zeros((max_len - feat.shape[0], feat.shape[1]),
- dtype=np.float32)
+ feat_add = np.zeros(
+ (max_len - feat.shape[0], feat.shape[1]), dtype=np.float32)
feat_pad = np.concatenate((feat, feat_add), axis=0)
return feat_pad
@@ -244,6 +245,7 @@ def prepare_test(self, idx):
return self.prepare_train(idx)
def evaluate(self, results):
- return ava_evaluate_results(self.info, len(self), results,
- self.custom_classes, self.label_file,
- self.file_path, self.exclude_file)
+ return ava_evaluate_results(self.info,
+ len(self), results, self.custom_classes,
+ self.label_file, self.file_path,
+ self.exclude_file)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/base.py
old mode 100644
new mode 100755
index 2549dc411..0b4035953
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/base.py
@@ -38,6 +38,7 @@ class BaseDataset(Dataset, ABC):
test_mode (bool): whether to build test dataset. Default: False.
"""
+
def __init__(self, file_path, pipeline, data_prefix=None, test_mode=False):
super().__init__()
self.file_path = file_path
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/bmn_dataset.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/bmn_dataset.py
old mode 100644
new mode 100755
index 44c765191..1c7c69096
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/bmn_dataset.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/bmn_dataset.py
@@ -25,13 +25,13 @@
class BMNDataset(BaseDataset):
"""Video dataset for action localization.
"""
+
def __init__(
- self,
- file_path,
- pipeline,
- subset,
- **kwargs,
- ):
+ self,
+ file_path,
+ pipeline,
+ subset,
+ **kwargs, ):
self.subset = subset
super().__init__(file_path, pipeline, **kwargs)
@@ -45,16 +45,15 @@ def load_file(self):
info.append(
dict(
video_name=video_name,
- video_info=annos[video_name],
- ))
+ video_info=annos[video_name], ))
#sort by video_name
sort_f = lambda elem: elem['video_name']
info.sort(key=sort_f)
#add video_idx to info
for idx, elem in enumerate(info):
info[idx]['video_idx'] = idx
- logger.info("{} subset video numbers: {}".format(
- self.subset, len(info)))
+ logger.info("{} subset video numbers: {}".format(self.subset,
+ len(info)))
return info
def prepare_train(self, idx):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/davis_dataset.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/davis_dataset.py
old mode 100644
new mode 100755
index 20a275971..552fefb9c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/davis_dataset.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/davis_dataset.py
@@ -32,6 +32,7 @@
class VOS_Test(Dataset):
"""process frames in each video
"""
+
def __init__(self,
image_root,
label_root,
@@ -59,8 +60,8 @@ def __init__(self,
current_label_name = img_name.split('.')[0] + '.png'
if current_label_name in self.labels:
current_label = self.read_label(current_label_name)
- if temp_obj_num < np.unique(
- current_label)[-1]: #get object number from label_id
+ if temp_obj_num < np.unique(current_label)[
+ -1]: #get object number from label_id
temp_obj_num = np.unique(current_label)[-1]
def __len__(self):
@@ -127,17 +128,17 @@ def __getitem__(self, idx):
class DavisDataset(BaseDataset):
"""Davis 2017 dataset.
"""
+
def __init__(
- self,
- file_path,
- result_root,
- pipeline,
- data_prefix=None,
- test_mode=False,
- year=2017,
- rgb=False,
- resolution='480p',
- ):
+ self,
+ file_path,
+ result_root,
+ pipeline,
+ data_prefix=None,
+ test_mode=False,
+ year=2017,
+ rgb=False,
+ resolution='480p', ):
self.rgb = rgb
self.result_root = result_root
self.resolution = resolution
@@ -152,8 +153,8 @@ def load_file(self):
self.resolution)
seq_names = []
with open(
- os.path.join(self.file_path, 'ImageSets', str(self.year),
- self.spt + '.txt')) as f:
+ os.path.join(self.file_path, 'ImageSets',
+ str(self.year), self.spt + '.txt')) as f:
seqs_tmp = f.readlines()
seqs_tmp = list(map(lambda elem: elem.strip(), seqs_tmp))
seq_names.extend(seqs_tmp)
@@ -178,12 +179,13 @@ def prepare_test(self, idx):
shutil.copy(source_label_path, result_label_path)
- seq_dataset = VOS_Test(self.image_root,
- self.label_root,
- seq_name,
- images,
- labels,
- self.pipeline,
- rgb=self.rgb,
- resolution=480)
+ seq_dataset = VOS_Test(
+ self.image_root,
+ self.label_root,
+ seq_name,
+ images,
+ labels,
+ self.pipeline,
+ rgb=self.rgb,
+ resolution=480)
return seq_dataset
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/feature.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/feature.py
old mode 100644
new mode 100755
index 7bf4cd604..87d821c56
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/feature.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/feature.py
@@ -24,14 +24,14 @@ class FeatureDataset(BaseDataset):
Example:(TODO)
Args:(TODO)
"""
+
def __init__(
- self,
- file_path,
- pipeline,
- data_prefix=None,
- test_mode=False,
- suffix=None,
- ):
+ self,
+ file_path,
+ pipeline,
+ data_prefix=None,
+ test_mode=False,
+ suffix=None, ):
self.suffix = suffix
super().__init__(file_path, pipeline, data_prefix, test_mode)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/frame.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/frame.py
old mode 100644
new mode 100755
index b02f52659..caaef839c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/frame.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/frame.py
@@ -46,6 +46,7 @@ class FrameDataset(BaseDataset):
suffix (str): suffix of file. Default: 'img_{:05}.jpg'.
"""
+
def __init__(self,
file_path,
pipeline,
@@ -67,10 +68,11 @@ def load_file(self):
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
info.append(
- dict(frame_dir=frame_dir,
- suffix=self.suffix,
- frames_len=frames_len,
- labels=int(labels)))
+ dict(
+ frame_dir=frame_dir,
+ suffix=self.suffix,
+ frames_len=frames_len,
+ labels=int(labels)))
return info
def prepare_train(self, idx):
@@ -125,7 +127,9 @@ class FrameDataset_Sport(BaseDataset):
pipeline(XXX): A sequence of data transforms.
**kwargs: Keyword arguments for ```BaseDataset```.
"""
- def __init__(self, file_path, pipeline, num_retries=5, suffix='', **kwargs):
+
+ def __init__(self, file_path, pipeline, num_retries=5, suffix='',
+ **kwargs):
self.num_retries = num_retries
self.suffix = suffix
super().__init__(file_path, pipeline, **kwargs)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/msrvtt.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/msrvtt.py
old mode 100644
new mode 100755
index e90718c00..9580b454f
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/msrvtt.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/msrvtt.py
@@ -31,22 +31,22 @@
class MSRVTTDataset(BaseDataset):
"""MSR-VTT dataset for text-video clip retrieval.
"""
+
def __init__(
- self,
- file_path,
- pipeline,
- features_path,
- bert_model="bert-base-uncased",
- padding_index=0,
- max_seq_length=36,
- max_region_num=36,
- max_action_num=5,
- vision_feature_dim=2048,
- action_feature_dim=2048,
- spatials_dim=5,
- data_prefix=None,
- test_mode=False,
- ):
+ self,
+ file_path,
+ pipeline,
+ features_path,
+ bert_model="bert-base-uncased",
+ padding_index=0,
+ max_seq_length=36,
+ max_region_num=36,
+ max_action_num=5,
+ vision_feature_dim=2048,
+ action_feature_dim=2048,
+ spatials_dim=5,
+ data_prefix=None,
+ test_mode=False, ):
self.features_path = features_path
self.bert_model = bert_model
self.padding_index = padding_index
@@ -56,8 +56,8 @@ def __init__(
self.vision_feature_dim = vision_feature_dim
self.action_feature_dim = action_feature_dim
self.spatials_dim = spatials_dim
- self._tokenizer = BertTokenizer.from_pretrained(bert_model,
- do_lower_case=True)
+ self._tokenizer = BertTokenizer.from_pretrained(
+ bert_model, do_lower_case=True)
super().__init__(file_path, pipeline, data_prefix, test_mode)
self.tokenize()
self.gen_feature()
@@ -115,18 +115,19 @@ def get_image_feature(self, video_id):
g_feat = np.sum(features, axis=0) / num_boxes
num_boxes = num_boxes + 1
features = np.concatenate(
- [np.expand_dims(g_feat, axis=0), features], axis=0)
+ [np.expand_dims(
+ g_feat, axis=0), features], axis=0)
action_features = item["action_features"].reshape(
-1, self.action_feature_dim)
- image_location = np.zeros((boxes.shape[0], self.spatials_dim),
- dtype=np.float32)
+ image_location = np.zeros(
+ (boxes.shape[0], self.spatials_dim), dtype=np.float32)
image_location[:, :4] = boxes
- image_location[:,
- 4] = ((image_location[:, 3] - image_location[:, 1]) *
- (image_location[:, 2] - image_location[:, 0]) /
- (float(image_w) * float(image_h)))
+ image_location[:, 4] = (
+ (image_location[:, 3] - image_location[:, 1]) *
+ (image_location[:, 2] - image_location[:, 0]) /
+ (float(image_w) * float(image_h)))
image_location[:, 0] = image_location[:, 0] / float(image_w)
image_location[:, 1] = image_location[:, 1] / float(image_h)
@@ -135,7 +136,8 @@ def get_image_feature(self, video_id):
g_location = np.array([0, 0, 1, 1, 1])
image_location = np.concatenate(
- [np.expand_dims(g_location, axis=0), image_location], axis=0)
+ [np.expand_dims(
+ g_location, axis=0), image_location], axis=0)
return features, num_boxes, image_location, action_features
def gen_feature(self):
@@ -204,8 +206,7 @@ def prepare_test(self, idx):
input_mask,
self.image_mask_all,
self.action_mask_all,
- target_all,
- )
+ target_all, )
def __len__(self):
return len(self.caption_entries)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/oxford.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/oxford.py
old mode 100644
new mode 100755
index a9e65c698..6d45a26c3
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/oxford.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/oxford.py
@@ -44,10 +44,11 @@ def load_file(self):
folder = osp.dirname(filename)
frame_index = line.strip().split('/')[1]
info.append(
- dict(data_path=self.data_prefix,
- filename=filename,
- folder=folder,
- frame_index=int(frame_index)))
+ dict(
+ data_path=self.data_prefix,
+ filename=filename,
+ folder=folder,
+ frame_index=int(frame_index)))
return info
def prepare_train(self, idx):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/skeleton.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/skeleton.py
old mode 100644
new mode 100755
index 30a3f3e70..a46dcfb51
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/skeleton.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/skeleton.py
@@ -36,6 +36,7 @@ class SkeletonDataset(BaseDataset):
data_prefix (str): directory path of the data. Default: None.
test_mode (bool): Whether to bulid the test dataset. Default: False.
"""
+
def __init__(self, file_path, pipeline, label_path=None, test_mode=False):
self.label_path = label_path
super().__init__(file_path, pipeline, test_mode=test_mode)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/slowfast_video.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/slowfast_video.py
old mode 100644
new mode 100755
index 1adf89c54..c891c198c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/slowfast_video.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/slowfast_video.py
@@ -23,6 +23,7 @@
logger = get_logger("paddlevideo")
+
@DATASETS.register()
class SFVideoDataset(BaseDataset):
"""Video dataset for action recognition
@@ -47,16 +48,16 @@ class SFVideoDataset(BaseDataset):
**kwargs: Keyword arguments for ```BaseDataset```.
"""
+
def __init__(
- self,
- file_path,
- pipeline,
- num_ensemble_views=1,
- num_spatial_crops=1,
- num_retries=5,
- num_samples_precise_bn=None,
- **kwargs,
- ):
+ self,
+ file_path,
+ pipeline,
+ num_ensemble_views=1,
+ num_spatial_crops=1,
+ num_retries=5,
+ num_samples_precise_bn=None,
+ **kwargs, ):
self.num_ensemble_views = num_ensemble_views
self.num_spatial_crops = num_spatial_crops
self.num_retries = num_retries
@@ -84,8 +85,7 @@ def load_file(self):
temporal_sample_index=tidx,
spatial_sample_index=sidx,
temporal_num_clips=self.num_ensemble_views,
- spatial_num_clips=self.num_spatial_crops,
- ))
+ spatial_num_clips=self.num_spatial_crops, ))
return info
def prepare_train(self, idx):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/video.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/video.py
old mode 100644
new mode 100755
index f2d8f897a..ef7084c6c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/video.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/dataset/video.py
@@ -41,7 +41,9 @@ class VideoDataset(BaseDataset):
pipeline(XXX): A sequence of data transforms.
**kwargs: Keyword arguments for ```BaseDataset```.
"""
- def __init__(self, file_path, pipeline, num_retries=5, suffix='', **kwargs):
+
+ def __init__(self, file_path, pipeline, num_retries=5, suffix='',
+ **kwargs):
self.num_retries = num_retries
self.suffix = suffix
super().__init__(file_path, pipeline, **kwargs)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/__init__.py
old mode 100644
new mode 100755
index 144c579cd..ca2072af4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/__init__.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/__init__.py
@@ -13,11 +13,10 @@
# limitations under the License.
from .anet_pipeline import GetMatchMap, GetVideoLabel, LoadFeat
-from .augmentations import (CenterCrop, ColorJitter, GroupRandomFlip,
- GroupResize, Image2Array, JitterScale, MultiCrop,
- Normalization, PackOutput, RandomCrop, RandomFlip,
- RandomResizedCrop, Scale, TenCrop, ToArray,
- UniformCrop)
+from .augmentations import (
+ CenterCrop, ColorJitter, GroupRandomFlip, GroupResize, Image2Array,
+ JitterScale, MultiCrop, Normalization, PackOutput, RandomCrop, RandomFlip,
+ RandomResizedCrop, Scale, TenCrop, ToArray, UniformCrop)
from .augmentations_ava import *
from .compose import Compose
from .decode import FeatureDecoder, FrameDecoder, VideoDecoder
@@ -37,8 +36,8 @@
'AutoPadding', 'Normalization', 'Mixup', 'Image2Array', 'Scale',
'GroupResize', 'VideoDecoder', 'FrameDecoder', 'PackOutput',
'GetVideoLabel', 'Cutmix', 'CenterCrop', 'RandomCrop', 'LoadFeat',
- 'RandomCap', 'JitterScale', 'Iden', 'VideoMix', 'ColorJitter', 'RandomFlip',
- 'ToArray', 'FeaturePadding', 'GetMatchMap', 'GroupRandomFlip', 'MultiCrop',
- 'SFMRI_DecodeSampler', 'MultiRestrictSize', 'MultiNorm',
- 'RandomResizedCrop', 'SamplerPkl'
+ 'RandomCap', 'JitterScale', 'Iden', 'VideoMix', 'ColorJitter',
+ 'RandomFlip', 'ToArray', 'FeaturePadding', 'GetMatchMap',
+ 'GroupRandomFlip', 'MultiCrop', 'SFMRI_DecodeSampler', 'MultiRestrictSize',
+ 'MultiNorm', 'RandomResizedCrop', 'SamplerPkl'
]
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/anet_pipeline.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/anet_pipeline.py
old mode 100644
new mode 100755
index 210d733b7..7525828d2
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/anet_pipeline.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/anet_pipeline.py
@@ -108,9 +108,8 @@ def __call__(self, results):
tmp_start = max(min(1, gt["segment"][0] / video_second), 0)
tmp_end = max(min(1, gt["segment"][1] / video_second), 0)
gt_bbox.append([tmp_start, tmp_end])
- tmp_gt_iou_map = self.iou_with_anchors(match_map[:, 0],
- match_map[:, 1], tmp_start,
- tmp_end)
+ tmp_gt_iou_map = self.iou_with_anchors(
+ match_map[:, 0], match_map[:, 1], tmp_start, tmp_end)
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.dscale, self.tscale])
gt_iou_map.append(tmp_gt_iou_map)
@@ -130,16 +129,14 @@ def __call__(self, results):
for jdx in range(len(anchor_xmin)):
match_score_start.append(
np.max(
- self.ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx],
- gt_start_bboxs[:, 0],
- gt_start_bboxs[:, 1])))
+ self.ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[
+ jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(anchor_xmin)):
match_score_end.append(
np.max(
- self.ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx],
- gt_end_bboxs[:, 0], gt_end_bboxs[:,
- 1])))
+ self.ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[
+ jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
gt_start = np.array(match_score_start)
gt_end = np.array(match_score_end)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/augmentations.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/augmentations.py
old mode 100644
new mode 100755
index 00c99ec35..64aed69f4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/augmentations.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/augmentations.py
@@ -35,6 +35,7 @@ class Scale(object):
do_round(bool): Whether to round up when calculating the zoom ratio. default: False
backend(str): Choose pillow or cv2 as the graphics processing backend. default: 'pillow'
"""
+
def __init__(self,
short_size,
fixed_ratio=True,
@@ -105,12 +106,14 @@ def __call__(self, results):
resized_imgs.append(img.resize((ow, oh), Image.BILINEAR))
elif self.backend == 'cv2' and (self.keep_ratio is not None):
resized_imgs.append(
- cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR))
+ cv2.resize(
+ img, (ow, oh), interpolation=cv2.INTER_LINEAR))
else:
resized_imgs.append(
Image.fromarray(
- cv2.resize(np.asarray(img), (ow, oh),
- interpolation=cv2.INTER_LINEAR)))
+ cv2.resize(
+ np.asarray(img), (ow, oh),
+ interpolation=cv2.INTER_LINEAR)))
results['imgs'] = resized_imgs
return results
@@ -122,6 +125,7 @@ class RandomCrop(object):
Args:
target_size(int): Random crop a square with the target_size from an image.
"""
+
def __init__(self, target_size):
self.target_size = target_size
@@ -189,8 +193,8 @@ def get_crop_bbox(img_shape,
min_ar, max_ar = aspect_ratio_range
aspect_ratios = np.exp(
- np.random.uniform(np.log(min_ar), np.log(max_ar),
- size=max_attempts))
+ np.random.uniform(
+ np.log(min_ar), np.log(max_ar), size=max_attempts))
target_areas = np.random.uniform(*area_range, size=max_attempts) * area
candidate_crop_w = np.round(np.sqrt(target_areas *
aspect_ratios)).astype(np.int32)
@@ -248,6 +252,7 @@ class CenterCrop(object):
target_size(int): Center crop a square with the target_size from an image.
do_round(bool): Whether to round up the coordinates of the upper left corner of the cropping area. default: True
"""
+
def __init__(self, target_size, do_round=True, backend='pillow'):
self.target_size = target_size
self.do_round = do_round
@@ -281,10 +286,10 @@ def __call__(self, results):
assert (w >= self.target_size) and (h >= self.target_size), \
"image width({}) and height({}) should be larger than crop size".format(
w, h, self.target_size)
- x1 = int(round(
- (w - tw) / 2.0)) if self.do_round else (w - tw) // 2
- y1 = int(round(
- (h - th) / 2.0)) if self.do_round else (h - th) // 2
+ x1 = int(round((w - tw) / 2.0)) if self.do_round else (
+ w - tw) // 2
+ y1 = int(round((h - th) / 2.0)) if self.do_round else (
+ h - th) // 2
if self.backend == 'cv2':
ccrop_imgs.append(img[y1:y1 + th, x1:x1 + tw])
elif self.backend == 'pillow':
@@ -305,6 +310,7 @@ class MultiScaleCrop(object):
allow_duplication(int): Whether to allow duplicate candidate crop starting points.
more_fix_crop(int): Whether to allow more cutting starting points.
"""
+
def __init__(
self,
target_size, # NOTE: named target size now, but still pass short size in it!
@@ -408,9 +414,10 @@ def _sample_crop_size(im_size):
else:
ret_img_group = [
Image.fromarray(
- cv2.resize(np.asarray(img),
- dsize=(input_size[0], input_size[1]),
- interpolation=cv2.INTER_LINEAR))
+ cv2.resize(
+ np.asarray(img),
+ dsize=(input_size[0], input_size[1]),
+ interpolation=cv2.INTER_LINEAR))
for img in crop_img_group
]
results['imgs'] = ret_img_group
@@ -424,6 +431,7 @@ class RandomFlip(object):
Args:
p(float): Random flip images with the probability p.
"""
+
def __init__(self, p=0.5):
self.p = p
@@ -460,6 +468,7 @@ class Image2Array(object):
Args:
transpose: whether to transpose or not, default True, False for slowfast.
"""
+
def __init__(self, transpose=True, data_format='tchw'):
assert data_format in [
'tchw', 'cthw'
@@ -505,6 +514,7 @@ class Normalization(object):
std(Sequence[float]): std values of different channels.
tensor_shape(list): size of mean, default [3,1,1]. For slowfast, [1,1,1,3]
"""
+
def __init__(self, mean, std, tensor_shape=[3, 1, 1], inplace=False):
if not isinstance(mean, Sequence):
raise TypeError(
@@ -560,6 +570,7 @@ class JitterScale(object):
min_size: Lower bound for random sampler.
max_size: Higher bound for random sampler.
"""
+
def __init__(self,
min_size,
max_size,
@@ -596,8 +607,8 @@ def __call__(self, results):
height, width = imgs.shape[2:]
else:
width, height = imgs[0].size
- if (width <= height and width == size) or (height <= width
- and height == size):
+ if (width <= height and width == size) or (height <= width and
+ height == size):
return results
new_width = size
@@ -608,10 +619,11 @@ def __call__(self, results):
new_width = int(math.floor((float(width) / height) * size))
if 'backend' in results and results['backend'] == 'pyav':
- frames_resize = F.interpolate(imgs,
- size=(new_height, new_width),
- mode="bilinear",
- align_corners=False) # [c,t,h,w]
+ frames_resize = F.interpolate(
+ imgs,
+ size=(new_height, new_width),
+ mode="bilinear",
+ align_corners=False) # [c,t,h,w]
else:
frames_resize = []
for j in range(len(imgs)):
@@ -631,6 +643,7 @@ class MultiCrop(object):
Args:
target_size(int): Random crop a square with the target_size from an image.
"""
+
def __init__(self,
target_size,
default_crop_size=224,
@@ -710,6 +723,7 @@ class PackOutput(object):
Args:
alpha(int): temporal length of fast/slow
"""
+
def __init__(self, alpha):
self.alpha = alpha
@@ -782,6 +796,7 @@ class TenCrop:
Args:
target_size(int | tuple[int]): (w, h) of target size for crop.
"""
+
def __init__(self, target_size):
self.target_size = (target_size, target_size)
@@ -823,6 +838,7 @@ class UniformCrop:
Args:
target_size(int | tuple[int]): (w, h) of target size for crop.
"""
+
def __init__(self, target_size, backend='cv2'):
if isinstance(target_size, tuple):
self.target_size = target_size
@@ -866,8 +882,8 @@ def __call__(self, results):
img_crops = []
if 'backend' in results and results['backend'] == 'pyav': # [c,t,h,w]
for x_offset, y_offset in offsets:
- crop = imgs[:, :, y_offset:y_offset + crop_h,
- x_offset:x_offset + crop_w]
+ crop = imgs[:, :, y_offset:y_offset + crop_h, x_offset:x_offset
+ + crop_w]
img_crops.append(crop)
img_crops = paddle.concat(img_crops, axis=1)
else:
@@ -881,8 +897,8 @@ def __call__(self, results):
else:
for x_offset, y_offset in offsets:
crop = [
- img[y_offset:y_offset + crop_h,
- x_offset:x_offset + crop_w] for img in imgs
+ img[y_offset:y_offset + crop_h, x_offset:x_offset +
+ crop_w] for img in imgs
]
img_crops.extend(crop)
results['imgs'] = img_crops
@@ -937,6 +953,7 @@ def __call__(self, results):
class ColorJitter(object):
"""Randomly change the brightness, contrast, saturation and hue of an image.
"""
+
def __init__(self,
brightness=0,
contrast=0,
@@ -998,9 +1015,8 @@ def __call__(self, results):
for k in list(imgs):
if "color" in k or "color_n" in k:
n, im, i = k
- imgs[(n, im,
- i)] = imgs[(n, im,
- i)].transpose(Image.FLIP_LEFT_RIGHT)
+ imgs[(n, im, i)] = imgs[(
+ n, im, i)].transpose(Image.FLIP_LEFT_RIGHT)
if "depth_gt" in imgs:
imgs['depth_gt'] = np.array(np.fliplr(imgs['depth_gt']))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/augmentations_ava.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/augmentations_ava.py
old mode 100644
new mode 100755
index e7cbe3c39..ffa5e06c0
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/augmentations_ava.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/augmentations_ava.py
@@ -37,6 +37,7 @@
'lanczos': cv2.INTER_LANCZOS4
}
+
def _init_lazy_if_proper(results, lazy):
"""Initialize lazy operation properly.
@@ -61,8 +62,8 @@ def _init_lazy_if_proper(results, lazy):
img_h, img_w = results['img_shape']
lazyop = dict()
lazyop['original_shape'] = results['img_shape']
- lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h],
- dtype=np.float32)
+ lazyop['crop_bbox'] = np.array(
+ [0, 0, img_w, img_h], dtype=np.float32)
lazyop['flip'] = False
lazyop['flip_direction'] = None
lazyop['interpolation'] = None
@@ -70,6 +71,7 @@ def _init_lazy_if_proper(results, lazy):
else:
assert 'lazy' not in results, 'Use Fuse after lazy operations'
+
def _scale_size(size, scale):
"""Rescale a size by a ratio.
@@ -83,6 +85,7 @@ def _scale_size(size, scale):
w, h = size
return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)
+
def rescale_size(old_size, scale, return_scale=False):
"""Calculate the new size to be rescaled to.
@@ -119,6 +122,7 @@ def rescale_size(old_size, scale, return_scale=False):
else:
return new_size
+
def imresize(img,
size,
return_scale=False,
@@ -148,6 +152,7 @@ def imresize(img,
h_scale = size[1] / h
return resized_img, w_scale, h_scale
+
@PIPELINES.register()
class EntityBoxRescale:
"""Rescale the entity box and proposals according to the image shape.
@@ -165,11 +170,11 @@ def __init__(self, scale_factor):
def __call__(self, results):
scale_factor = np.concatenate([self.scale_factor, self.scale_factor])
-
+
if 'gt_bboxes' in results:
gt_bboxes = results['gt_bboxes']
results['gt_bboxes'] = gt_bboxes * scale_factor
-
+
if 'proposals' in results:
proposals = results['proposals']
if proposals is not None:
@@ -183,6 +188,7 @@ def __call__(self, results):
def __repr__(self):
return f'{self.__class__.__name__}(scale_factor={self.scale_factor})'
+
@PIPELINES.register()
class EntityBoxCrop:
"""Crop the entity boxes and proposals according to the cropped images.
@@ -217,14 +223,17 @@ def __call__(self, results):
if proposals is not None:
assert proposals.shape[-1] == 4
proposals_ = proposals.copy()
- proposals_[..., 0::2] = np.clip(proposals[..., 0::2] - x1, 0, img_w - 1)
- proposals_[..., 1::2] = np.clip(proposals[..., 1::2] - y1, 0, img_h - 1)
+ proposals_[..., 0::2] = np.clip(proposals[..., 0::2] - x1, 0,
+ img_w - 1)
+ proposals_[..., 1::2] = np.clip(proposals[..., 1::2] - y1, 0,
+ img_h - 1)
results['proposals'] = proposals_
return results
def __repr__(self):
return f'{self.__class__.__name__}(crop_bbox={self.crop_bbox})'
+
@PIPELINES.register()
class EntityBoxFlip:
"""Flip the entity boxes and proposals with a probability.
@@ -335,14 +344,13 @@ def __call__(self, results):
else:
new_w, new_h = self.scale
- self.scale_factor = np.array([new_w / img_w, new_h / img_h],
- dtype=np.float32)
+ self.scale_factor = np.array(
+ [new_w / img_w, new_h / img_h], dtype=np.float32)
results['img_shape'] = (new_h, new_w)
results['keep_ratio'] = self.keep_ratio
results['scale_factor'] = results['scale_factor'] * self.scale_factor
-
- if not self.lazy:
+ if not self.lazy:
results['imgs'] = [
imresize(
img, (new_w, new_h), interpolation=self.interpolation)
@@ -368,6 +376,7 @@ def __repr__(self):
f'lazy={self.lazy})')
return repr_str
+
@PIPELINES.register()
class RandomRescale:
"""Randomly resize images so that the short_edge is resized to a specific
@@ -377,7 +386,7 @@ class RandomRescale:
def __init__(self, scale_range, interpolation='bilinear'):
scale_range = eval(scale_range)
self.scale_range = scale_range
-
+
assert len(scale_range) == 2
assert scale_range[0] < scale_range[1]
assert np.all([x > 0 for x in scale_range])
@@ -394,10 +403,11 @@ def __call__(self, results):
"""
short_edge = np.random.randint(self.scale_range[0],
self.scale_range[1] + 1)
- resize = Resize((-1, short_edge),
- keep_ratio=True,
- interpolation=self.interpolation,
- lazy=False)
+ resize = Resize(
+ (-1, short_edge),
+ keep_ratio=True,
+ interpolation=self.interpolation,
+ lazy=False)
results = resize(results)
results['short_edge'] = short_edge
@@ -410,6 +420,7 @@ def __repr__(self):
f'interpolation={self.interpolation})')
return repr_str
+
@PIPELINES.register()
class Rescale:
"""resize images so that the short_edge is resized to a specific
@@ -440,10 +451,11 @@ def __call__(self, results):
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
- resize = Resize(self.scale_range,
- keep_ratio=True,
- interpolation=self.interpolation,
- lazy=False)
+ resize = Resize(
+ self.scale_range,
+ keep_ratio=True,
+ interpolation=self.interpolation,
+ lazy=False)
results = resize(results)
return results
@@ -508,11 +520,13 @@ def __call__(self, results):
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
- results['crop_quadruple'] = np.array( new_crop_quadruple, dtype=np.float32)
+ results['crop_quadruple'] = np.array(
+ new_crop_quadruple, dtype=np.float32)
new_h, new_w = self.size, self.size
- results['crop_bbox'] = np.array( [x_offset, y_offset, x_offset + new_w, y_offset + new_h])
+ results['crop_bbox'] = np.array(
+ [x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['img_shape'] = (new_h, new_w)
if not self.lazy:
@@ -531,11 +545,10 @@ def __call__(self, results):
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
- lazyop['crop_bbox'] = np.array([(lazy_left + left),
- (lazy_top + top),
- (lazy_left + right),
- (lazy_top + bottom)],
- dtype=np.float32)
+ lazyop['crop_bbox'] = np.array(
+ [(lazy_left + left), (lazy_top + top), (lazy_left + right),
+ (lazy_top + bottom)],
+ dtype=np.float32)
# Process entity boxes
if 'gt_bboxes' in results:
@@ -550,6 +563,7 @@ def __repr__(self):
f'lazy={self.lazy})')
return repr_str
+
def imflip_(img, direction='horizontal'):
"""Inplace flip an image horizontally or vertically.
@@ -569,6 +583,7 @@ def imflip_(img, direction='horizontal'):
else:
return cv2.flip(img, -1, img)
+
def iminvert(img):
"""Invert (negate) an image.
@@ -580,6 +595,7 @@ def iminvert(img):
"""
return np.full_like(img, 255) - img
+
@PIPELINES.register()
class Flip:
"""Flip the input images with a probability.
@@ -649,6 +665,7 @@ def __repr__(self):
f'lazy={self.lazy})')
return repr_str
+
def imnormalize_(img, mean, std, to_rgb=True):
"""Inplace normalize an image with mean and std.
@@ -671,6 +688,7 @@ def imnormalize_(img, mean, std, to_rgb=True):
cv2.multiply(img, stdinv, img) # inplace
return img
+
@PIPELINES.register()
class Normalize:
"""Normalize images with the given mean and std value.
@@ -691,8 +709,7 @@ class Normalize:
def __init__(self, mean, std, to_bgr=False, adjust_magnitude=False):
if not isinstance(mean, Sequence):
raise TypeError(
- f'Mean must be list, tuple or np.ndarray, but got {type(mean)}'
- )
+ f'Mean must be list, tuple or np.ndarray, but got {type(mean)}')
if not isinstance(std, Sequence):
raise TypeError(
@@ -716,7 +733,7 @@ def __call__(self, results):
results['imgs'] = imgs
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_bgr=self.to_bgr)
-
+
return results
def __repr__(self):
@@ -726,5 +743,3 @@ def __repr__(self):
f'to_bgr={self.to_bgr}, '
f'adjust_magnitude={self.adjust_magnitude})')
return repr_str
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/compose.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/compose.py
old mode 100644
new mode 100755
index 76eb4ed4d..8fbd1f737
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/compose.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/compose.py
@@ -41,6 +41,7 @@ class Compose(object):
A compose object which is callable, __call__ for this Compose
object will call each given :attr:`transforms` sequencely.
"""
+
def __init__(self, pipelines):
#assert isinstance(pipelines, Sequence)
self.pipelines = []
@@ -54,8 +55,8 @@ def __init__(self, pipelines):
temp_dict = dict(name=list(t.keys())[0])
for all_sub_t in t.values():
if all_sub_t is not None:
- temp_dict.update(all_sub_t)
-
+ temp_dict.update(all_sub_t)
+
t = build(temp_dict, PIPELINES)
self.pipelines.append(t)
elif callable(p):
@@ -63,6 +64,7 @@ def __init__(self, pipelines):
else:
raise TypeError(f'pipelines must be callable or a dict,'
f'but got {type(p)}')
+
def __call__(self, data):
for p in self.pipelines:
try:
@@ -71,6 +73,6 @@ def __call__(self, data):
stack_info = traceback.format_exc()
logger = get_logger("paddlevideo")
logger.info("fail to perform transform [{}] with error: "
- "{} and stack:\n{}".format(p, e, str(stack_info)))
+ "{} and stack:\n{}".format(p, e, str(stack_info)))
raise e
return data
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode.py
old mode 100644
new mode 100755
index 478ea04e5..6db01aa62
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode.py
@@ -41,6 +41,7 @@ class VideoDecoder(object):
Args:
filepath: the file path of mp4 file
"""
+
def __init__(self,
backend='cv2',
mode='train',
@@ -112,9 +113,8 @@ def __call__(self, results):
else:
decode_all_video = False
start_idx, end_idx = get_start_end_idx(
- frames_length,
- self.sampling_rate * self.num_seg / self.target_fps * fps,
- clip_idx, num_clips)
+ frames_length, self.sampling_rate * self.num_seg /
+ self.target_fps * fps, clip_idx, num_clips)
timebase = duration / frames_length
video_start_pts = int(start_idx * timebase)
video_end_pts = int(end_idx * timebase)
@@ -125,10 +125,11 @@ def __call__(self, results):
margin = 1024
seek_offset = max(video_start_pts - margin, 0)
- container.seek(seek_offset,
- any_frame=False,
- backward=True,
- stream=container.streams.video[0])
+ container.seek(
+ seek_offset,
+ any_frame=False,
+ backward=True,
+ stream=container.streams.video[0])
tmp_frames = {}
buffer_count = 0
max_pts = 0
@@ -147,7 +148,9 @@ def __call__(self, results):
container.close()
- frames = [frame.to_rgb().to_ndarray() for frame in video_frames]
+ frames = [
+ frame.to_rgb().to_ndarray() for frame in video_frames
+ ]
clip_sz = self.sampling_rate * self.num_seg / self.target_fps * fps
start_idx, end_idx = get_start_end_idx(
@@ -170,6 +173,7 @@ def __call__(self, results):
class FrameDecoder(object):
"""just parse results
"""
+
def __init__(self):
pass
@@ -182,6 +186,7 @@ def __call__(self, results):
class MRIDecoder(object):
"""just parse results
"""
+
def __init__(self):
pass
@@ -195,6 +200,7 @@ class FeatureDecoder(object):
"""
Perform feature decode operations.e.g.youtube8m
"""
+
def __init__(self, num_classes, max_len=512, has_label=True):
self.max_len = max_len
self.num_classes = num_classes
@@ -218,8 +224,8 @@ def __call__(self, results):
b'nframes']
rgb = record['feature'].astype(
float) if 'feature' in record else record[b'feature'].astype(float)
- audio = record['audio'].astype(
- float) if 'audio' in record else record[b'audio'].astype(float)
+ audio = record['audio'].astype(float) if 'audio' in record else record[
+ b'audio'].astype(float)
if self.has_label:
label = record['label'] if 'label' in record else record[b'label']
one_hot_label = self.make_one_hot(label, self.num_classes)
@@ -227,12 +233,10 @@ def __call__(self, results):
rgb = rgb[0:nframes, :]
audio = audio[0:nframes, :]
- rgb = self.dequantize(rgb,
- max_quantized_value=2.,
- min_quantized_value=-2.)
- audio = self.dequantize(audio,
- max_quantized_value=2,
- min_quantized_value=-2)
+ rgb = self.dequantize(
+ rgb, max_quantized_value=2., min_quantized_value=-2.)
+ audio = self.dequantize(
+ audio, max_quantized_value=2, min_quantized_value=-2)
if self.has_label:
results['labels'] = one_hot_label.astype("float32")
@@ -249,15 +253,16 @@ def __call__(self, results):
feat = vitem[vi]
results[prefix + 'len'] = feat.shape[0]
#feat pad step 1. padding
- feat_add = np.zeros((self.max_len - feat.shape[0], feat.shape[1]),
- dtype=np.float32)
+ feat_add = np.zeros(
+ (self.max_len - feat.shape[0], feat.shape[1]),
+ dtype=np.float32)
feat_pad = np.concatenate((feat, feat_add), axis=0)
results[prefix + 'data'] = feat_pad.astype("float32")
#feat pad step 2. mask
feat_mask_origin = np.ones(feat.shape, dtype=np.float32)
feat_mask_add = feat_add
- feat_mask = np.concatenate((feat_mask_origin, feat_mask_add),
- axis=0)
+ feat_mask = np.concatenate(
+ (feat_mask_origin, feat_mask_add), axis=0)
results[prefix + 'mask'] = feat_mask.astype("float32")
return results
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_image.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_image.py
old mode 100644
new mode 100755
index dc86abab0..a4eeac55d
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_image.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_image.py
@@ -26,6 +26,7 @@
class ImageDecoder(object):
"""Decode Image
"""
+
def __init__(self,
dataset,
frame_idxs,
@@ -58,10 +59,9 @@ def get_image_path(self, dataset, folder, frame_index, side):
image_path = os.path.join(self.data_path, folder, f_str)
elif dataset == "kitti_odom":
f_str = "{:06d}{}".format(frame_index, self.img_ext)
- image_path = os.path.join(self.data_path,
- "sequences/{:02d}".format(int(folder)),
- "image_{}".format(self.side_map[side]),
- f_str)
+ image_path = os.path.join(
+ self.data_path, "sequences/{:02d}".format(int(folder)),
+ "image_{}".format(self.side_map[side]), f_str)
elif dataset == "kitti_depth":
f_str = "{:010d}{}".format(frame_index, self.img_ext)
image_path = os.path.join(
@@ -97,11 +97,12 @@ def get_depth(self, dataset, folder, frame_index, side):
depth_gt = depth_gt[160:960 - 160, :]
- depth_gt = skimage.transform.resize(depth_gt,
- self.full_res_shape[::-1],
- order=0,
- preserve_range=True,
- mode='constant')
+ depth_gt = skimage.transform.resize(
+ depth_gt,
+ self.full_res_shape[::-1],
+ order=0,
+ preserve_range=True,
+ mode='constant')
return depth_gt
@@ -159,9 +160,8 @@ def __call__(self, results):
other_side = {"r": "l", "l": "r"}[side]
imgs[("color", i,
-1)] = self.get_color(folder, frame_index, other_side)
- imgs[("color_n", i,
- -1)] = self.get_color(folder2, frame_index,
- other_side)
+ imgs[("color_n", i, -1)] = self.get_color(
+ folder2, frame_index, other_side)
else:
imgs[("color", i,
-1)] = self.get_color(folder, frame_index + i, side)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_sampler.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_sampler.py
old mode 100644
new mode 100755
index 2f8f8743d..137678303
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_sampler.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_sampler.py
@@ -30,6 +30,7 @@ class DecodeSampler(object):
target_fps(int): desired fps, default 30
test_mode(bool): whether test or train/valid. In slowfast, we use multicrop when test.
"""
+
def __init__(self,
num_frames,
sampling_rate,
@@ -75,10 +76,10 @@ def __call__(self, results):
fps = vr.get_avg_fps()
clip_size = self.num_frames * self.sampling_rate * fps / self.target_fps
- start_idx, end_idx = self.get_start_end_idx(videolen, clip_size,
- temporal_sample_index,
- temporal_num_clips)
- index = np.linspace(start_idx, end_idx, self.num_frames).astype("int64")
+ start_idx, end_idx = self.get_start_end_idx(
+ videolen, clip_size, temporal_sample_index, temporal_num_clips)
+ index = np.linspace(start_idx, end_idx,
+ self.num_frames).astype("int64")
index = np.clip(index, 0, videolen)
frames_select = vr.get_batch(index) #1 for buffer
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_sampler_MRI.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_sampler_MRI.py
old mode 100644
new mode 100755
index 54189041a..502063ec3
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_sampler_MRI.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/decode_sampler_MRI.py
@@ -36,6 +36,7 @@ class SFMRI_DecodeSampler(object):
Returns:
frames_idx: the index of sampled #frames.
"""
+
def __init__(self,
num_seg,
seg_len,
@@ -116,10 +117,8 @@ def __call__(self, results):
sample_pos = max(1, 1 + frames_len - 64)
t_stride1 = 64 // self.num_seg[0]
t_stride2 = 64 // self.num_seg[1]
- start_list = np.linspace(0,
- sample_pos - 1,
- num=10,
- dtype=int)
+ start_list = np.linspace(
+ 0, sample_pos - 1, num=10, dtype=int)
offsets_s = []
offsets_f = []
for start_idx in start_list.tolist():
@@ -139,7 +138,8 @@ def __call__(self, results):
idx = 0
if not self.valid_mode:
if average_dur1 >= self.seg_len:
- idx = random.randint(0, average_dur1 - self.seg_len)
+ idx = random.randint(0,
+ average_dur1 - self.seg_len)
idx += i * average_dur1
elif average_dur1 >= 1:
idx += i * average_dur1
@@ -160,7 +160,8 @@ def __call__(self, results):
idx = 0
if not self.valid_mode:
if average_dur2 >= self.seg_len:
- idx = random.randint(0, average_dur2 - self.seg_len)
+ idx = random.randint(0,
+ average_dur2 - self.seg_len)
idx += i * average_dur2
elif average_dur2 >= 1:
idx += i * average_dur2
@@ -182,18 +183,22 @@ def __call__(self, results):
else: # for TSM
if not self.valid_mode:
if average_dur2 > 0:
- offsets_s = np.multiply(list(range(
- self.num_seg[0])), average_dur1) + np.random.randint(
+ offsets_s = np.multiply(
+ list(range(self.num_seg[0])),
+ average_dur1) + np.random.randint(
average_dur1, size=self.num_seg[0])
- offsets_f = np.multiply(list(range(
- self.num_seg[1])), average_dur2) + np.random.randint(
+ offsets_f = np.multiply(
+ list(range(self.num_seg[1])),
+ average_dur2) + np.random.randint(
average_dur2, size=self.num_seg[1])
elif frames_len > self.num_seg[1]:
offsets_s = np.sort(
- np.random.randint(frames_len, size=self.num_seg[0]))
+ np.random.randint(
+ frames_len, size=self.num_seg[0]))
offsets_f = np.sort(
- np.random.randint(frames_len, size=self.num_seg[1]))
+ np.random.randint(
+ frames_len, size=self.num_seg[1]))
else:
offsets_s = np.zeros(shape=(self.num_seg[0], ))
offsets_f = np.zeros(shape=(self.num_seg[1], ))
@@ -201,13 +206,13 @@ def __call__(self, results):
if frames_len > self.num_seg[1]:
average_dur_float_s = frames_len / self.num_seg[0]
offsets_s = np.array([
- int(average_dur_float_s / 2.0 + average_dur_float_s * x)
- for x in range(self.num_seg[0])
+ int(average_dur_float_s / 2.0 + average_dur_float_s *
+ x) for x in range(self.num_seg[0])
])
average_dur_float_f = frames_len / self.num_seg[1]
offsets_f = np.array([
- int(average_dur_float_f / 2.0 + average_dur_float_f * x)
- for x in range(self.num_seg[1])
+ int(average_dur_float_f / 2.0 + average_dur_float_f *
+ x) for x in range(self.num_seg[1])
])
else:
offsets_s = np.zeros(shape=(self.num_seg[0], ))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/mix.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/mix.py
old mode 100644
new mode 100755
index ccc5f98cf..6fda93559
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/mix.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/mix.py
@@ -24,6 +24,7 @@ class Mixup(object):
Args:
alpha(float): alpha value.
"""
+
def __init__(self, alpha=0.2):
assert alpha > 0., \
'parameter alpha[%f] should > 0.0' % (alpha)
@@ -47,6 +48,7 @@ class Cutmix(object):
Args:
alpha(float): alpha value.
"""
+
def __init__(self, alpha=0.2):
assert alpha > 0., \
'parameter alpha[%f] should > 0.0' % (alpha)
@@ -98,6 +100,7 @@ class VideoMix(object):
mixup_alpha(float): alpha for mixup aug
cutmix_alpha(float): alpha for cutmix aug
"""
+
def __init__(self, cutmix_prob=0.5, mixup_alpha=0.2, cutmix_alpha=1.0):
assert cutmix_prob > 0., \
'parameter cutmix_prob[%f] should > 0.0' % (cutmix_prob)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/multimodal.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/multimodal.py
old mode 100644
new mode 100755
index ccbb1ca9d..57243139a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/multimodal.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/multimodal.py
@@ -28,6 +28,7 @@ class FeaturePadding(object):
"""
Padding feature to target shape.
"""
+
def __init__(self, max_region_num=36, max_action_num=5):
self.max_region_num = max_region_num
self.max_action_num = max_action_num
@@ -46,7 +47,8 @@ def __call__(self, results):
image_target = np.zeros((self.max_region_num, 1601), dtype=np.float32)
image_location = np.zeros((self.max_region_num, 5), dtype=np.float32)
- action_feature = np.zeros((self.max_action_num, 2048), dtype=np.float32)
+ action_feature = np.zeros(
+ (self.max_action_num, 2048), dtype=np.float32)
action_target = np.zeros((self.max_action_num, ), dtype=np.int64)
num_boxes = int(num_boxes)
@@ -54,9 +56,10 @@ def __call__(self, results):
image_target[:num_boxes] = image_target_wp
image_location[:num_boxes, :4] = image_location_wp
- image_location[:, 4] = (image_location[:, 3] - image_location[:, 1]) * (
- image_location[:, 2] - image_location[:, 0]) / (float(image_w) *
- float(image_h))
+ image_location[:, 4] = (
+ image_location[:, 3] - image_location[:, 1]) * (
+ image_location[:, 2] - image_location[:, 0]) / (
+ float(image_w) * float(image_h))
image_location[:, 0] = image_location[:, 0] / float(image_w)
image_location[:, 1] = image_location[:, 1] / float(image_h)
@@ -72,15 +75,16 @@ def __call__(self, results):
action_feature = copy.deepcopy(action_feature)
action_target = copy.deepcopy(action_target)
- results = dict(image_feat=image_feature,
- image_target=image_target,
- caption=caption,
- image_loc=image_location,
- num_boxes=int(num_boxes),
- action_feat=action_feature,
- action_target=action_target,
- num_actions=int(num_actions),
- tokenizer=tokenizer)
+ results = dict(
+ image_feat=image_feature,
+ image_target=image_target,
+ caption=caption,
+ image_loc=image_location,
+ num_boxes=int(num_boxes),
+ action_feat=action_feature,
+ action_target=action_target,
+ num_actions=int(num_actions),
+ tokenizer=tokenizer)
return results
@@ -150,15 +154,17 @@ def __init__(self,
self.max_region_length = max_region_length
def get_image_global_feature(self, image_feat, image_loc, image_mask):
- g_image_feat = np.sum(image_feat, axis=0) / np.sum(
- image_mask, axis=0, keepdims=True)
+ g_image_feat = np.sum(image_feat, axis=0) / np.sum(image_mask,
+ axis=0,
+ keepdims=True)
image_feat = np.concatenate(
- [np.expand_dims(g_image_feat, axis=0), image_feat],
- axis=0).astype("float32")
+ [np.expand_dims(
+ g_image_feat, axis=0), image_feat], axis=0).astype("float32")
g_image_loc = np.array([0, 0, 1, 1, 1]).astype("float32")
image_loc = np.concatenate(
- [np.expand_dims(g_image_loc, axis=0), image_loc], axis=0)
+ [np.expand_dims(
+ g_image_loc, axis=0), image_loc], axis=0)
g_image_mask = np.array([1])
image_mask = np.concatenate([g_image_mask, image_mask], axis=0)
@@ -205,8 +211,7 @@ def random_word(self, tokens, tokenizer):
#tok = random.choice(list(tokenizer.vocab.items()))[0]
tok = tokenizer.vocab.idx_to_token[random.randint(
0,
- tokenizer.vocab_size,
- )]
+ tokenizer.vocab_size, )]
tokens[i] = tok
# rest 10% randomly keep current token
@@ -286,9 +291,8 @@ def __call__(self, results):
image_feat, image_loc, image_label = self.random_region(
image_feat, image_loc, num_boxes)
- action_feat, action_label = self.random_action(action_feat,
- action_target,
- num_actions)
+ action_feat, action_label = self.random_action(
+ action_feat, action_target, num_actions)
# concatenate lm labels and account for CLS, SEP, SEP
lm_label_ids = [-1] + caption_label + [-1]
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/sample.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/sample.py
old mode 100644
new mode 100755
index 3dfead73a..51e00b27a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/sample.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/sample.py
@@ -43,6 +43,7 @@ class Sampler(object):
Returns:
frames_idx: the index of sampled #frames.
"""
+
def __init__(self,
num_seg,
seg_len,
@@ -69,8 +70,8 @@ def _get(self, frames_idx, results):
imgs = []
for idx in frames_idx:
img = Image.open(
- os.path.join(frame_dir,
- results['suffix'].format(idx))).convert('RGB')
+ os.path.join(frame_dir, results['suffix'].format(
+ idx))).convert('RGB')
imgs.append(img)
elif data_format == "MRI":
@@ -128,12 +129,12 @@ def _get_train_clips(self, num_frames):
if avg_interval > 0:
base_offsets = np.arange(self.num_seg) * avg_interval
- clip_offsets = base_offsets + np.random.randint(avg_interval,
- size=self.num_seg)
+ clip_offsets = base_offsets + np.random.randint(
+ avg_interval, size=self.num_seg)
elif num_frames > max(self.num_seg, ori_seg_len):
clip_offsets = np.sort(
- np.random.randint(num_frames - ori_seg_len + 1,
- size=self.num_seg))
+ np.random.randint(
+ num_frames - ori_seg_len + 1, size=self.num_seg))
elif avg_interval == 0:
ratio = (num_frames - ori_seg_len + 1.0) / self.num_seg
clip_offsets = np.around(np.arange(self.num_seg) * ratio)
@@ -167,8 +168,8 @@ def __call__(self, results):
else:
offsets = self._get_test_clips(frames_len)
- offsets = offsets[:, None] + np.arange(
- self.seg_len)[None, :] * self.frame_interval
+ offsets = offsets[:, None] + np.arange(self.seg_len)[
+ None, :] * self.frame_interval
offsets = np.concatenate(offsets)
offsets = offsets.reshape((-1, self.seg_len))
@@ -218,10 +219,8 @@ def __call__(self, results):
else:
sample_pos = max(1, 1 + frames_len - 64)
t_stride = 64 // self.num_seg
- start_list = np.linspace(0,
- sample_pos - 1,
- num=10,
- dtype=int)
+ start_list = np.linspace(
+ 0, sample_pos - 1, num=10, dtype=int)
offsets = []
for start_idx in start_list.tolist():
offsets += [
@@ -263,12 +262,14 @@ def __call__(self, results):
else: # for TSM
if not self.valid_mode:
if average_dur > 0:
- offsets = np.multiply(list(range(self.num_seg)),
- average_dur) + np.random.randint(
- average_dur, size=self.num_seg)
+ offsets = np.multiply(
+ list(range(self.num_seg)),
+ average_dur) + np.random.randint(
+ average_dur, size=self.num_seg)
elif frames_len > self.num_seg:
offsets = np.sort(
- np.random.randint(frames_len, size=self.num_seg))
+ np.random.randint(
+ frames_len, size=self.num_seg))
else:
offsets = np.zeros(shape=(self.num_seg, ))
else:
@@ -308,6 +309,7 @@ class SamplerPkl(object):
Returns:
frames_idx: the index of sampled #frames.
"""
+
def __init__(self, num_seg, seg_len, backend='pillow', valid_mode=False):
self.num_seg = num_seg
self.seg_len = seg_len
@@ -339,7 +341,8 @@ def __call__(self, results):
elif len(label) == 1:
results['labels'] = int(label[0])
else:
- results['labels'] = int(label[0]) if random.random() < 0.5 else int(label[1])
+ results['labels'] = int(label[0]) if random.random(
+ ) < 0.5 else int(label[1])
results['frames_len'] = len(frames)
#results['labels'] = label
frames_len = results['frames_len']
@@ -371,4 +374,4 @@ def __call__(self, results):
results['backend'] = self.backend
results['imgs'] = imgs
- return results
\ No newline at end of file
+ return results
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/sample_ava.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/sample_ava.py
old mode 100644
new mode 100755
index 39e90a216..10acc4697
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/sample_ava.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/sample_ava.py
@@ -96,8 +96,8 @@ def __call__(self, results):
"""Perform the SampleFrames loading. """
total_frames = results['total_frames']
clip_offsets = self._sample_clips(total_frames)
- frame_inds = clip_offsets[:, None] + np.arange(
- self.clip_len)[None, :] * self.frame_interval
+ frame_inds = clip_offsets[:, None] + np.arange(self.clip_len)[
+ None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
@@ -133,6 +133,7 @@ def __repr__(self):
f'test_mode={self.test_mode})')
return repr_str
+
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends. """
@@ -144,6 +145,7 @@ def get(self, filepath):
def get_text(self, filepath):
pass
+
class HardDiskBackend(BaseStorageBackend):
"""Raw hard disks storage backend."""
@@ -159,12 +161,11 @@ def get_text(self, filepath):
value_buf = f.read()
return value_buf
+
class FileClient:
"""A general file client to access files in different backend. """
- _backends = {
- 'disk': HardDiskBackend,
- }
+ _backends = {'disk': HardDiskBackend, }
def __init__(self, backend='disk', **kwargs):
if backend not in self._backends:
@@ -212,6 +213,7 @@ def get(self, filepath):
def get_text(self, filepath):
return self.client.get_text(filepath)
+
@PIPELINES.register()
class RawFrameDecode:
"""Load and decode frames with given indices. """
@@ -222,7 +224,7 @@ def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
self.kwargs = kwargs
self.file_client = None
- def _pillow2array(self,img, flag='color', channel_order='bgr'):
+ def _pillow2array(self, img, flag='color', channel_order='bgr'):
"""Convert a pillow image to numpy array. """
channel_order = channel_order.lower()
@@ -261,7 +263,8 @@ def _pillow2array(self,img, flag='color', channel_order='bgr'):
f'but got {flag}')
return array
- def _imfrombytes(self,content, flag='color', channel_order='bgr'):#, backend=None):
+ def _imfrombytes(self, content, flag='color',
+ channel_order='bgr'): #, backend=None):
"""Read an image from bytes. """
img_np = np.frombuffer(content, np.uint8)
@@ -297,7 +300,7 @@ def __call__(self, results):
for frame_idx in results['frame_inds']:
frame_idx += offset
filepath = osp.join(directory, suffix.format(frame_idx))
- img_bytes = self.file_client.get(filepath) #以二进制方式读取图片
+ img_bytes = self.file_client.get(filepath) #以二进制方式读取图片
# Get frame with channel order RGB directly.
cur_frame = self._imfrombytes(img_bytes, channel_order='rgb')
@@ -326,9 +329,9 @@ def __repr__(self):
f'decoding_backend={self.decoding_backend})')
return repr_str
+
@PIPELINES.register()
class SampleAVAFrames(SampleFrames):
-
def __init__(self, clip_len, frame_interval=2, test_mode=False):
super().__init__(clip_len, frame_interval, test_mode=test_mode)
@@ -372,4 +375,3 @@ def __repr__(self):
f'frame_interval={self.frame_interval}, '
f'test_mode={self.test_mode})')
return repr_str
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/segmentation.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/segmentation.py
old mode 100644
new mode 100755
index 247144267..4d5df7a2b
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/segmentation.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/segmentation.py
@@ -83,9 +83,8 @@ def __call__(self, sample):
continue
else:
flagval = cv2.INTER_CUBIC
- tmp = cv2.resize(tmp,
- dsize=(new_w, new_h),
- interpolation=flagval)
+ tmp = cv2.resize(
+ tmp, dsize=(new_w, new_h), interpolation=flagval)
new_sample[elem] = tmp
samples.append(new_sample)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/skeleton_pipeline.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/skeleton_pipeline.py
old mode 100644
new mode 100755
index 27d774c74..693da5ae0
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/skeleton_pipeline.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/skeleton_pipeline.py
@@ -28,6 +28,7 @@ class AutoPadding(object):
window_size: int, temporal size of skeleton feature.
random_pad: bool, whether do random padding when frame length < window size. Default: False.
"""
+
def __init__(self, window_size, random_pad=False):
self.window_size = window_size
self.random_pad = random_pad
@@ -49,14 +50,14 @@ def __call__(self, results):
if T == self.window_size:
data_pad = data[:, :self.window_size, :, :]
elif T < self.window_size:
- begin = random.randint(0, self.window_size -
- T) if self.random_pad else 0
+ begin = random.randint(
+ 0, self.window_size - T) if self.random_pad else 0
data_pad = np.zeros((C, self.window_size, V, M))
data_pad[:, begin:begin + T, :, :] = data[:, :T, :, :]
else:
if self.random_pad:
- index = np.random.choice(T, self.window_size,
- replace=False).astype('int64')
+ index = np.random.choice(
+ T, self.window_size, replace=False).astype('int64')
else:
index = np.linspace(0, T, self.window_size).astype("int64")
data_pad = data[:, index, :, :]
@@ -72,6 +73,7 @@ class SkeletonNorm(object):
Args:
aixs: dimensions of vertex coordinate. 2 for (x,y), 3 for (x,y,z). Default: 2.
"""
+
def __init__(self, axis=2, squeeze=False):
self.axis = axis
self.squeeze = squeeze
@@ -98,6 +100,7 @@ class Iden(object):
"""
Wrapper Pipeline
"""
+
def __init__(self, label_expand=True):
self.label_expand = label_expand
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/registry.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/registry.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ActivityNet/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ActivityNet/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ActivityNet/anet_prop.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ActivityNet/anet_prop.py
old mode 100644
new mode 100755
index 411b164f9..eac9555a5
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ActivityNet/anet_prop.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ActivityNet/anet_prop.py
@@ -166,12 +166,11 @@ def evaluate(self):
print('[RESULTS] Performance on ActivityNet proposal task.')
with open("data/bmn/BMN_Test_results/auc_result.txt",
"a") as text_file:
- text_file.write(
- '\tArea Under the AR vs AN curve: {}% \n'.format(
- 100. * float(area_under_curve) /
- proposals_per_video[-1]))
- print('\tArea Under the AR vs AN curve: {}%'.format(
- 100. * float(area_under_curve) / proposals_per_video[-1]))
+ text_file.write('\tArea Under the AR vs AN curve: {}% \n'.
+ format(100. * float(area_under_curve) /
+ proposals_per_video[-1]))
+ print('\tArea Under the AR vs AN curve: {}%'.format(100. * float(
+ area_under_curve) / proposals_per_video[-1]))
self.recall = recall
self.avg_recall = avg_recall
@@ -206,11 +205,11 @@ def average_recall_vs_avg_nr_proposals(self,
video_lst = ground_truth['video-id'].unique()
if not max_avg_nr_proposals:
- max_avg_nr_proposals = float(
- proposals.shape[0]) / video_lst.shape[0]
+ max_avg_nr_proposals = float(proposals.shape[0]) / video_lst.shape[
+ 0]
- ratio = max_avg_nr_proposals * float(
- video_lst.shape[0]) / proposals.shape[0]
+ ratio = max_avg_nr_proposals * float(video_lst.shape[
+ 0]) / proposals.shape[0]
# Adaptation to query faster
ground_truth_gbvn = ground_truth.groupby('video-id')
@@ -234,9 +233,9 @@ def average_recall_vs_avg_nr_proposals(self,
score_lst.append(np.zeros((n, 1)))
continue
- this_video_proposals = proposals_videoid.loc[:,
- ['t-start', 't-end'
- ]].values
+ this_video_proposals = proposals_videoid.loc[:, [
+ 't-start', 't-end'
+ ]].values
if this_video_proposals.shape[0] == 0:
n = this_video_ground_truth.shape[0]
@@ -248,8 +247,8 @@ def average_recall_vs_avg_nr_proposals(self,
this_video_proposals = this_video_proposals[sort_idx, :]
if this_video_proposals.ndim != 2:
- this_video_proposals = np.expand_dims(this_video_proposals,
- axis=0)
+ this_video_proposals = np.expand_dims(
+ this_video_proposals, axis=0)
if this_video_ground_truth.ndim != 2:
this_video_ground_truth = np.expand_dims(
this_video_ground_truth, axis=0)
@@ -271,8 +270,9 @@ def average_recall_vs_avg_nr_proposals(self,
# retrieved per video.
# Computes average recall.
- pcn_lst = np.arange(1, 101) / 100.0 * (max_avg_nr_proposals * float(
- video_lst.shape[0]) / total_nr_proposals)
+ pcn_lst = np.arange(1, 101) / 100.0 * (max_avg_nr_proposals *
+ float(video_lst.shape[0]) /
+ total_nr_proposals)
matches = np.empty((video_lst.shape[0], pcn_lst.shape[0]))
positives = np.empty(video_lst.shape[0])
recall = np.empty((tiou_thresholds.shape[0], pcn_lst.shape[0]))
@@ -292,8 +292,8 @@ def average_recall_vs_avg_nr_proposals(self,
for j, nr_proposals in enumerate(pcn_proposals):
# Compute the number of matches for each percentage of the proposals
- matches[i, j] = np.count_nonzero(
- (true_positives_tiou[:, :nr_proposals]).sum(axis=1))
+ matches[i, j] = np.count_nonzero((
+ true_positives_tiou[:, :nr_proposals]).sum(axis=1))
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/__init__.py
old mode 100644
new mode 100755
index c64ba4e0d..c7dcfcd08
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/__init__.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/__init__.py
@@ -29,5 +29,6 @@
__all__ = [
'METRIC', 'build_metric', 'MultiCropMetric', 'BMNMetric',
'CenterCropMetric', 'SkeletonMetric', 'HitOneMetric', 'TransNetV2Metric',
- 'DepthMetric', 'MSRVTTMetric', 'VOSMetric', 'CenterCropMetric_MRI','AVAMetric'
+ 'DepthMetric', 'MSRVTTMetric', 'VOSMetric', 'CenterCropMetric_MRI',
+ 'AVAMetric'
]
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/README.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/metrics.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/metrics.py
old mode 100644
new mode 100755
index 13eb03469..211ae5672
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/metrics.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/metrics.py
@@ -11,8 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-
"""Functions for computing metrics like precision, recall, CorLoc and etc."""
import numpy as np
@@ -36,8 +34,8 @@ def compute_precision_recall(scores, labels, num_gt):
instances. This value is None if no ground truth labels are
present.
"""
- if (not isinstance(labels, np.ndarray) or labels.dtype != np.bool
- or len(labels.shape) != 1):
+ if (not isinstance(labels, np.ndarray) or labels.dtype != np.bool or
+ len(labels.shape) != 1):
raise ValueError('labels must be single dimension bool numpy array')
if not isinstance(scores, np.ndarray) or len(scores.shape) != 1:
@@ -88,8 +86,8 @@ def compute_average_precision(precision, recall):
raise ValueError('If precision is None, recall must also be None')
return np.NAN
- if not isinstance(precision, np.ndarray) or not isinstance(
- recall, np.ndarray):
+ if not isinstance(precision, np.ndarray) or not isinstance(recall,
+ np.ndarray):
raise ValueError('precision and recall must be numpy array')
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError('input must be float numpy array.')
@@ -138,6 +136,6 @@ def compute_cor_loc(num_gt_imgs_per_class,
"""
# Divide by zero expected for classes with no gt examples.
with np.errstate(divide='ignore', invalid='ignore'):
- return np.where(
- num_gt_imgs_per_class == 0, np.nan,
- num_images_correctly_detected_per_class / num_gt_imgs_per_class)
+ return np.where(num_gt_imgs_per_class == 0, np.nan,
+ num_images_correctly_detected_per_class /
+ num_gt_imgs_per_class)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/np_box_list.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/np_box_list.py
old mode 100644
new mode 100755
index f9b101e6f..1e50a2621
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/np_box_list.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/np_box_list.py
@@ -76,8 +76,7 @@ def add_field(self, field, field_data):
"""
if self.has_field(field):
raise ValueError('Field ' + field + 'already exists')
- if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes(
- ):
+ if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes():
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/np_box_ops.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/np_box_ops.py
old mode 100644
new mode 100755
index 94e7d300c..2c2333e5a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/np_box_ops.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/np_box_ops.py
@@ -73,9 +73,9 @@ def iou(boxes1, boxes2):
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
- union = (
- np.expand_dims(area1, axis=1) + np.expand_dims(area2, axis=0) -
- intersect)
+ union = (np.expand_dims(
+ area1, axis=1) + np.expand_dims(
+ area2, axis=0) - intersect)
return intersect / union
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/object_detection_evaluation.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/object_detection_evaluation.py
old mode 100644
new mode 100755
index c9f00540f..9d2622575
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/object_detection_evaluation.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/object_detection_evaluation.py
@@ -100,14 +100,13 @@ class ObjectDetectionEvaluator(DetectionEvaluator):
"""A class to evaluate detections."""
def __init__(
- self,
- categories,
- matching_iou_threshold=0.5,
- evaluate_corlocs=False,
- metric_prefix=None,
- use_weighted_mean_ap=False,
- evaluate_masks=False,
- ):
+ self,
+ categories,
+ matching_iou_threshold=0.5,
+ evaluate_corlocs=False,
+ metric_prefix=None,
+ use_weighted_mean_ap=False,
+ evaluate_masks=False, ):
"""Constructor.
Args:
@@ -143,8 +142,7 @@ def __init__(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
- label_id_offset=self._label_id_offset,
- )
+ label_id_offset=self._label_id_offset, )
self._image_ids = set([])
self._evaluate_corlocs = evaluate_corlocs
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
@@ -176,21 +174,18 @@ def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
dictionary.
"""
if image_id in self._image_ids:
- raise ValueError(
- 'Image with id {} already added.'.format(image_id))
+ raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_classes = (
- groundtruth_dict[
- standard_fields.InputDataFields.groundtruth_classes] -
- self._label_id_offset)
+ groundtruth_dict[standard_fields.InputDataFields.
+ groundtruth_classes] - self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is
# empty (unless there are no annotations for the groundtruth on this
# image) use values from the dictionary or insert None otherwise.
- if (standard_fields.InputDataFields.groundtruth_difficult
- in groundtruth_dict.keys()) and (groundtruth_dict[
+ if (standard_fields.InputDataFields.groundtruth_difficult in
+ groundtruth_dict.keys()) and (groundtruth_dict[
standard_fields.InputDataFields.groundtruth_difficult].size
- or
- not groundtruth_classes.size):
+ or not groundtruth_classes.size):
groundtruth_difficult = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_difficult]
else:
@@ -212,8 +207,7 @@ def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_class_labels=groundtruth_classes,
groundtruth_is_difficult_list=groundtruth_difficult,
- groundtruth_masks=groundtruth_masks,
- )
+ groundtruth_masks=groundtruth_masks, )
self._image_ids.update([image_id])
def add_single_detected_image_info(self, image_id, detections_dict):
@@ -240,13 +234,12 @@ def add_single_detected_image_info(self, image_id, detections_dict):
ValueError: If detection masks are not in detections dictionary.
"""
detection_classes = (
- detections_dict[
- standard_fields.DetectionResultFields.detection_classes] -
- self._label_id_offset)
+ detections_dict[standard_fields.DetectionResultFields.
+ detection_classes] - self._label_id_offset)
detection_masks = None
if self._evaluate_masks:
- if (standard_fields.DetectionResultFields.detection_masks
- not in detections_dict):
+ if (standard_fields.DetectionResultFields.detection_masks not in
+ detections_dict):
raise ValueError(
'Detection masks not in detections dictionary.')
detection_masks = detections_dict[
@@ -258,8 +251,7 @@ def add_single_detected_image_info(self, image_id, detections_dict):
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detected_class_labels=detection_classes,
- detected_masks=detection_masks,
- )
+ detected_masks=detection_masks, )
def create_category_index(self, categories):
"""Creates dictionary of COCO compatible categories keyed by category
@@ -300,15 +292,13 @@ def evaluate(self):
_,
_,
per_class_corloc,
- mean_corloc,
- ) = self._evaluation.evaluate()
+ mean_corloc, ) = self._evaluation.evaluate()
metric = f'mAP@{self._matching_iou_threshold}IOU'
pascal_metrics = {self._metric_prefix + metric: mean_ap}
if self._evaluate_corlocs:
- pascal_metrics[self._metric_prefix +
- 'Precision/meanCorLoc@{}IOU'.format(
- self._matching_iou_threshold)] = mean_corloc
+ pascal_metrics[self._metric_prefix + 'Precision/meanCorLoc@{}IOU'.
+ format(self._matching_iou_threshold)] = mean_corloc
category_index = self.create_category_index(self._categories)
for idx in range(per_class_ap.size):
if idx + self._label_id_offset in category_index:
@@ -316,18 +306,16 @@ def evaluate(self):
self._metric_prefix +
'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold,
- category_index[idx + self._label_id_offset]['name'],
- ))
+ category_index[idx + self._label_id_offset]['name'], ))
pascal_metrics[display_name] = per_class_ap[idx]
# Optionally add CorLoc metrics.classes
- if self._evaluate_corlocs: #False
+ if self._evaluate_corlocs: #False
display_name = (
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold,
- category_index[idx +
- self._label_id_offset]['name'],
+ category_index[idx + self._label_id_offset]['name'],
))
pascal_metrics[display_name] = per_class_corloc[idx]
@@ -339,8 +327,7 @@ def clear(self):
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
- label_id_offset=self._label_id_offset,
- )
+ label_id_offset=self._label_id_offset, )
self._image_ids.clear()
@@ -352,8 +339,7 @@ def __init__(self, categories, matching_iou_threshold=0.5):
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
- use_weighted_mean_ap=False,
- )
+ use_weighted_mean_ap=False, )
ObjectDetectionEvalMetrics = collections.namedtuple(
@@ -365,30 +351,27 @@ def __init__(self, categories, matching_iou_threshold=0.5):
'recalls',
'corlocs',
'mean_corloc',
- ],
-)
+ ], )
class ObjectDetectionEvaluation:
"""Internal implementation of Pascal object detection metrics."""
def __init__(
- self,
- num_groundtruth_classes,
- matching_iou_threshold=0.5,
- nms_iou_threshold=1.0,
- nms_max_output_boxes=10000,
- use_weighted_mean_ap=False,
- label_id_offset=0,
- ):
+ self,
+ num_groundtruth_classes,
+ matching_iou_threshold=0.5,
+ nms_iou_threshold=1.0,
+ nms_max_output_boxes=10000,
+ use_weighted_mean_ap=False,
+ label_id_offset=0, ):
if num_groundtruth_classes < 1:
raise ValueError(
'Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes=num_groundtruth_classes,
- matching_iou_threshold=matching_iou_threshold,
- )
+ matching_iou_threshold=matching_iou_threshold, )
self.num_class = num_groundtruth_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
@@ -408,8 +391,7 @@ def _initialize_detections(self):
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
- self.average_precision_per_class = np.empty(
- self.num_class, dtype=float)
+ self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = []
self.recalls_per_class = []
@@ -419,14 +401,13 @@ def clear_detections(self):
self._initialize_detections()
def add_single_ground_truth_image_info(
- self,
- image_key,
- groundtruth_boxes,
- groundtruth_class_labels,
- groundtruth_is_difficult_list=None,
- groundtruth_is_group_of_list=None,
- groundtruth_masks=None,
- ):
+ self,
+ image_key,
+ groundtruth_boxes,
+ groundtruth_class_labels,
+ groundtruth_is_difficult_list=None,
+ groundtruth_is_group_of_list=None,
+ groundtruth_masks=None, ):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
@@ -470,17 +451,15 @@ def add_single_ground_truth_image_info(
self._update_ground_truth_statistics(
groundtruth_class_labels,
groundtruth_is_difficult_list.astype(dtype=bool),
- groundtruth_is_group_of_list.astype(dtype=bool),
- )
+ groundtruth_is_group_of_list.astype(dtype=bool), )
def add_single_detected_image_info(
- self,
- image_key,
- detected_boxes,
- detected_scores,
- detected_class_labels,
- detected_masks=None,
- ):
+ self,
+ image_key,
+ detected_boxes,
+ detected_scores,
+ detected_class_labels,
+ detected_masks=None, ):
"""Adds detections for a single image to be used for evaluation.
Args:
@@ -507,8 +486,7 @@ def add_single_detected_image_info(
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % len(detected_boxes),
len(detected_scores),
- len(detected_class_labels),
- )
+ len(detected_class_labels), )
if image_key in self.detection_keys:
logging.warn(('image %s has already been added to the ground '
@@ -547,8 +525,7 @@ def add_single_detected_image_info(
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
- groundtruth_masks=groundtruth_masks,
- )
+ groundtruth_masks=groundtruth_masks, )
for i in range(self.num_class):
if scores[i].shape[0] > 0:
@@ -556,11 +533,10 @@ def add_single_detected_image_info(
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
def _update_ground_truth_statistics(
- self,
- groundtruth_class_labels,
- groundtruth_is_difficult_list,
- groundtruth_is_group_of_list,
- ):
+ self,
+ groundtruth_class_labels,
+ groundtruth_is_difficult_list,
+ groundtruth_is_group_of_list, ):
"""Update grouth truth statitistics.
1. Difficult boxes are ignored when counting the number of ground truth
@@ -579,8 +555,8 @@ def _update_ground_truth_statistics(
"""
for class_index in range(self.num_class):
num_gt_instances = np.sum(groundtruth_class_labels[
- ~groundtruth_is_difficult_list
- & ~groundtruth_is_group_of_list] == class_index)
+ ~groundtruth_is_difficult_list & ~groundtruth_is_group_of_list]
+ == class_index)
self.num_gt_instances_per_class[class_index] += num_gt_instances
if np.any(groundtruth_class_labels == class_index):
self.num_gt_imgs_per_class[class_index] += 1
@@ -603,8 +579,8 @@ def evaluate(self):
print(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
- self.label_id_offset, "self.detection_keys:",self.detection_keys
- )
+ self.label_id_offset, "self.detection_keys:",
+ self.detection_keys)
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
@@ -619,26 +595,24 @@ def evaluate(self):
tp_fp_labels = np.array([], dtype=bool)
else:
scores = np.concatenate(self.scores_per_class[class_index])
- tp_fp_labels = np.concatenate(
- self.tp_fp_labels_per_class[class_index])
+ tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[
+ class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
precision, recall = metrics.compute_precision_recall(
scores,
tp_fp_labels,
- self.num_gt_instances_per_class[class_index],
- )
+ self.num_gt_instances_per_class[class_index], )
self.precisions_per_class.append(precision)
self.recalls_per_class.append(recall)
- average_precision = metrics.compute_average_precision(
- precision, recall)
+ average_precision = metrics.compute_average_precision(precision,
+ recall)
self.average_precision_per_class[class_index] = average_precision
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
- self.num_images_correctly_detected_per_class,
- )
+ self.num_images_correctly_detected_per_class, )
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
@@ -654,5 +628,4 @@ def evaluate(self):
self.precisions_per_class,
self.recalls_per_class,
self.corloc_per_class,
- mean_corloc,
- )
+ mean_corloc, )
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/per_image_evaluation.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/per_image_evaluation.py
old mode 100644
new mode 100755
index 3013ae7ce..c35376210
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/per_image_evaluation.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/per_image_evaluation.py
@@ -41,17 +41,16 @@ def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5):
self.num_groundtruth_classes = num_groundtruth_classes
def compute_object_detection_metrics(
- self,
- detected_boxes,
- detected_scores,
- detected_class_labels,
- groundtruth_boxes,
- groundtruth_class_labels,
- groundtruth_is_difficult_list,
- groundtruth_is_group_of_list,
- detected_masks=None,
- groundtruth_masks=None,
- ):
+ self,
+ detected_boxes,
+ detected_scores,
+ detected_class_labels,
+ groundtruth_boxes,
+ groundtruth_class_labels,
+ groundtruth_is_difficult_list,
+ groundtruth_is_group_of_list,
+ detected_masks=None,
+ groundtruth_masks=None, ):
"""Evaluates detections as being tp, fp or ignored from a single image.
The evaluation is done in two stages:
@@ -97,13 +96,11 @@ def compute_object_detection_metrics(
detected_boxes,
detected_scores,
detected_class_labels,
- detected_masks,
- ) = self._remove_invalid_boxes(
- detected_boxes,
- detected_scores,
- detected_class_labels,
- detected_masks,
- )
+ detected_masks, ) = self._remove_invalid_boxes(
+ detected_boxes,
+ detected_scores,
+ detected_class_labels,
+ detected_masks, )
scores, tp_fp_labels = self._compute_tp_fp(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
@@ -113,23 +110,21 @@ def compute_object_detection_metrics(
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
- groundtruth_masks=groundtruth_masks,
- )
+ groundtruth_masks=groundtruth_masks, )
return scores, tp_fp_labels
def _compute_tp_fp(
- self,
- detected_boxes,
- detected_scores,
- detected_class_labels,
- groundtruth_boxes,
- groundtruth_class_labels,
- groundtruth_is_difficult_list,
- groundtruth_is_group_of_list,
- detected_masks=None,
- groundtruth_masks=None,
- ):
+ self,
+ detected_boxes,
+ detected_scores,
+ detected_class_labels,
+ groundtruth_boxes,
+ groundtruth_class_labels,
+ groundtruth_is_difficult_list,
+ groundtruth_is_group_of_list,
+ detected_masks=None,
+ groundtruth_masks=None, ):
"""Labels true/false positives of detections of an image across all
classes.
@@ -188,13 +183,10 @@ def _compute_tp_fp(
gt_masks_at_ith_class,
detected_boxes_at_ith_class,
detected_scores_at_ith_class,
- detected_masks_at_ith_class,
- ) = self._get_ith_class_arrays(detected_boxes, detected_scores,
- detected_masks,
- detected_class_labels,
- groundtruth_boxes,
- groundtruth_masks,
- groundtruth_class_labels, i)
+ detected_masks_at_ith_class, ) = self._get_ith_class_arrays(
+ detected_boxes, detected_scores, detected_masks,
+ detected_class_labels, groundtruth_boxes,
+ groundtruth_masks, groundtruth_class_labels, i)
scores, tp_fp_labels = self._compute_tp_fp_for_single_class(
detected_boxes=detected_boxes_at_ith_class,
detected_scores=detected_scores_at_ith_class,
@@ -204,19 +196,17 @@ def _compute_tp_fp(
groundtruth_is_group_of_list=(
groundtruth_is_group_of_list_at_ith_class),
detected_masks=detected_masks_at_ith_class,
- groundtruth_masks=gt_masks_at_ith_class,
- )
+ groundtruth_masks=gt_masks_at_ith_class, )
result_scores.append(scores)
result_tp_fp_labels.append(tp_fp_labels)
return result_scores, result_tp_fp_labels
def _get_overlaps_and_scores_box_mode(
- self,
- detected_boxes,
- detected_scores,
- groundtruth_boxes,
- groundtruth_is_group_of_list,
- ):
+ self,
+ detected_boxes,
+ detected_scores,
+ groundtruth_boxes,
+ groundtruth_is_group_of_list, ):
"""Computes overlaps and scores between detected and groudntruth boxes.
Args:
@@ -243,8 +233,8 @@ def _get_overlaps_and_scores_box_mode(
"""
detected_boxlist = np_box_list.BoxList(detected_boxes)
detected_boxlist.add_field('scores', detected_scores)
- gt_non_group_of_boxlist = np_box_list.BoxList(
- groundtruth_boxes[~groundtruth_is_group_of_list])
+ gt_non_group_of_boxlist = np_box_list.BoxList(groundtruth_boxes[
+ ~groundtruth_is_group_of_list])
iou = np_box_ops.iou(detected_boxlist.get(),
gt_non_group_of_boxlist.get())
@@ -253,15 +243,14 @@ def _get_overlaps_and_scores_box_mode(
return iou, None, scores, num_boxes
def _compute_tp_fp_for_single_class(
- self,
- detected_boxes,
- detected_scores,
- groundtruth_boxes,
- groundtruth_is_difficult_list,
- groundtruth_is_group_of_list,
- detected_masks=None,
- groundtruth_masks=None,
- ):
+ self,
+ detected_boxes,
+ detected_scores,
+ groundtruth_boxes,
+ groundtruth_is_difficult_list,
+ groundtruth_is_group_of_list,
+ detected_masks=None,
+ groundtruth_masks=None, ):
"""Labels boxes detected with the same class from the same image as
tp/fp.
@@ -302,13 +291,11 @@ def _compute_tp_fp_for_single_class(
iou,
_,
scores,
- num_detected_boxes,
- ) = self._get_overlaps_and_scores_box_mode(
- detected_boxes=detected_boxes,
- detected_scores=detected_scores,
- groundtruth_boxes=groundtruth_boxes,
- groundtruth_is_group_of_list=groundtruth_is_group_of_list,
- )
+ num_detected_boxes, ) = self._get_overlaps_and_scores_box_mode(
+ detected_boxes=detected_boxes,
+ detected_scores=detected_scores,
+ groundtruth_boxes=groundtruth_boxes,
+ groundtruth_is_group_of_list=groundtruth_is_group_of_list, )
if groundtruth_boxes.size == 0:
return scores, np.zeros(num_detected_boxes, dtype=bool)
@@ -342,21 +329,19 @@ def _compute_tp_fp_for_single_class(
return (
scores[~is_matched_to_difficult_box & ~is_matched_to_group_of_box],
- tp_fp_labels[~is_matched_to_difficult_box
- & ~is_matched_to_group_of_box],
- )
+ tp_fp_labels[~is_matched_to_difficult_box &
+ ~is_matched_to_group_of_box], )
def _get_ith_class_arrays(
- self,
- detected_boxes,
- detected_scores,
- detected_masks,
- detected_class_labels,
- groundtruth_boxes,
- groundtruth_masks,
- groundtruth_class_labels,
- class_index,
- ):
+ self,
+ detected_boxes,
+ detected_scores,
+ detected_masks,
+ detected_class_labels,
+ groundtruth_boxes,
+ groundtruth_masks,
+ groundtruth_class_labels,
+ class_index, ):
"""Returns numpy arrays belonging to class with index `class_index`.
Args:
@@ -401,16 +386,14 @@ class labels.
gt_masks_at_ith_class,
detected_boxes_at_ith_class,
detected_scores_at_ith_class,
- detected_masks_at_ith_class,
- )
+ detected_masks_at_ith_class, )
def _remove_invalid_boxes(
- self,
- detected_boxes,
- detected_scores,
- detected_class_labels,
- detected_masks=None,
- ):
+ self,
+ detected_boxes,
+ detected_scores,
+ detected_class_labels,
+ detected_masks=None, ):
"""Removes entries with invalid boxes.
A box is invalid if either its xmax is smaller than its xmin, or its
@@ -437,8 +420,7 @@ def _remove_invalid_boxes(
"""
valid_indices = np.logical_and(
detected_boxes[:, 0] < detected_boxes[:, 2],
- detected_boxes[:, 1] < detected_boxes[:, 3],
- )
+ detected_boxes[:, 1] < detected_boxes[:, 3], )
detected_boxes = detected_boxes[valid_indices]
detected_scores = detected_scores[valid_indices]
detected_class_labels = detected_class_labels[valid_indices]
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/standard_fields.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_evaluation/standard_fields.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_metric.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_metric.py
old mode 100644
new mode 100755
index 4ee21bdb6..fd288cc08
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_metric.py
@@ -29,7 +29,6 @@
@METRIC.register
class AVAMetric(BaseMetric):
-
def __init__(self,
data_size,
batch_size,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_utils.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_utils.py
old mode 100644
new mode 100755
index c6acf6b39..475198179
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/ava_utils.py
@@ -44,7 +44,7 @@ def det2csv(info, dataset_len, results, custom_classes):
for bbox in result[label]:
if type(bbox) == paddle.Tensor:
bbox = bbox.numpy()
-
+
bbox_ = tuple(bbox.tolist())
if custom_classes is not None:
actual_label = custom_classes[label + 1]
@@ -52,8 +52,7 @@ def det2csv(info, dataset_len, results, custom_classes):
actual_label = label + 1
csv_results.append((
video_id,
- timestamp,
- ) + bbox_[:4] + (actual_label, ) + bbox_[4:])
+ timestamp, ) + bbox_[:4] + (actual_label, ) + bbox_[4:])
return csv_results
@@ -222,23 +221,25 @@ def ava_eval(result_file,
if result_type == 'proposal':
gts = [
- np.array(gt_boxes[image_key], dtype=float) for image_key in gt_boxes
+ np.array(
+ gt_boxes[image_key], dtype=float) for image_key in gt_boxes
]
proposals = []
for image_key in gt_boxes:
if image_key in boxes:
proposals.append(
np.concatenate(
- (np.array(boxes[image_key], dtype=float),
- np.array(scores[image_key], dtype=float)[:, None]),
+ (np.array(
+ boxes[image_key], dtype=float), np.array(
+ scores[image_key], dtype=float)[:, None]),
axis=1))
else:
# if no corresponding proposal, add a fake one
proposals.append(np.array([0, 0, 1, 1, 1]))
# Proposals used here are with scores
- recalls = eval_recalls(gts, proposals, np.array(max_dets),
- np.arange(0.5, 0.96, 0.05))
+ recalls = eval_recalls(gts, proposals,
+ np.array(max_dets), np.arange(0.5, 0.96, 0.05))
ar = recalls.mean(axis=1)
ret = {}
for i, num in enumerate(max_dets):
@@ -254,18 +255,20 @@ def ava_eval(result_file,
start = time.time()
for image_key in gt_boxes:
if verbose and image_key in excluded_keys:
- logging.info(
- 'Found excluded timestamp in detections: %s.'
- 'It will be ignored.', image_key)
+ logging.info('Found excluded timestamp in detections: %s.'
+ 'It will be ignored.', image_key)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
- np.array(gt_boxes[image_key], dtype=float),
+ np.array(
+ gt_boxes[image_key], dtype=float),
standard_fields.InputDataFields.groundtruth_classes:
- np.array(gt_labels[image_key], dtype=int),
+ np.array(
+ gt_labels[image_key], dtype=int),
standard_fields.InputDataFields.groundtruth_difficult:
- np.zeros(len(gt_boxes[image_key]), dtype=bool)
+ np.zeros(
+ len(gt_boxes[image_key]), dtype=bool)
})
if verbose:
print_time('Convert groundtruth', start)
@@ -273,18 +276,20 @@ def ava_eval(result_file,
start = time.time()
for image_key in boxes:
if verbose and image_key in excluded_keys:
- logging.info(
- 'Found excluded timestamp in detections: %s.'
- 'It will be ignored.', image_key)
+ logging.info('Found excluded timestamp in detections: %s.'
+ 'It will be ignored.', image_key)
continue
pascal_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
- np.array(boxes[image_key], dtype=float),
+ np.array(
+ boxes[image_key], dtype=float),
standard_fields.DetectionResultFields.detection_classes:
- np.array(labels[image_key], dtype=int),
+ np.array(
+ labels[image_key], dtype=int),
standard_fields.DetectionResultFields.detection_scores:
- np.array(scores[image_key], dtype=float)
+ np.array(
+ scores[image_key], dtype=float)
})
if verbose:
print_time('convert detections', start)
@@ -372,8 +377,8 @@ def collect_results_cpu(result_part, size):
return ordered_results
-def ava_evaluate_results(info, dataset_len, results, custom_classes, label_file,
- file_path, exclude_file):
+def ava_evaluate_results(info, dataset_len, results, custom_classes,
+ label_file, file_path, exclude_file):
# need to create a temp result file
time_now = datetime.now().strftime('%Y%m%d_%H%M%S')
temp_file = f'AVA_{time_now}_result.csv'
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/base.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/bmn_metric.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/bmn_metric.py
old mode 100644
new mode 100755
index cc36283f9..5eae1658c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/bmn_metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/bmn_metric.py
@@ -68,15 +68,16 @@ def soft_nms(df, alpha, t1, t2):
while len(tscore) > 1 and len(rscore) < 101:
max_index = tscore.index(max(tscore))
- tmp_iou_list = iou_with_anchors(np.array(tstart), np.array(tend),
- tstart[max_index], tend[max_index])
+ tmp_iou_list = iou_with_anchors(
+ np.array(tstart),
+ np.array(tend), tstart[max_index], tend[max_index])
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = tmp_iou_list[idx]
tmp_width = tend[max_index] - tstart[max_index]
if tmp_iou > t1 + (t2 - t1) * tmp_width:
- tscore[idx] = tscore[idx] * np.exp(
- -np.square(tmp_iou) / alpha)
+ tscore[idx] = tscore[idx] * np.exp(-np.square(tmp_iou) /
+ alpha)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
@@ -187,8 +188,8 @@ def update(self, batch_id, data, outputs):
score_vector_list = np.stack(score_vector_list)
video_df = pd.DataFrame(score_vector_list, columns=cols)
- video_df.to_csv(os.path.join(self.output_path, "%s.csv" % video_name),
- index=False)
+ video_df.to_csv(
+ os.path.join(self.output_path, "%s.csv" % video_name), index=False)
if batch_id % self.log_interval == 0:
logger.info("Processing................ batch {}".format(batch_id))
@@ -198,8 +199,8 @@ def accumulate(self):
"""
# check clip index of each video
#Stage1
- self.bmn_post_processing(self.video_dict, self.subset, self.output_path,
- self.result_path)
+ self.bmn_post_processing(self.video_dict, self.subset,
+ self.output_path, self.result_path)
if self.get_metrics:
logger.info("[TEST] calculate metrics...")
#Stage2
@@ -216,7 +217,8 @@ def accumulate(self):
100 * np.mean(uniform_recall_valid[:, 9]),
100 * np.mean(uniform_recall_valid[:, -1])))
- def bmn_post_processing(self, video_dict, subset, output_path, result_path):
+ def bmn_post_processing(self, video_dict, subset, output_path,
+ result_path):
video_list = list(video_dict.keys())
global result_dict
result_dict = mp.Manager().dict()
@@ -228,15 +230,15 @@ def bmn_post_processing(self, video_dict, subset, output_path, result_path):
for tid in range(pp_num - 1):
tmp_video_list = video_list[tid * num_videos_per_thread:(tid + 1) *
num_videos_per_thread]
- p = mp.Process(target=self.video_process,
- args=(tmp_video_list, video_dict, output_path,
- result_dict))
+ p = mp.Process(
+ target=self.video_process,
+ args=(tmp_video_list, video_dict, output_path, result_dict))
p.start()
processes.append(p)
tmp_video_list = video_list[(pp_num - 1) * num_videos_per_thread:]
- p = mp.Process(target=self.video_process,
- args=(tmp_video_list, video_dict, output_path,
- result_dict))
+ p = mp.Process(
+ target=self.video_process,
+ args=(tmp_video_list, video_dict, output_path, result_dict))
p.start()
processes.append(p)
for p in processes:
@@ -279,7 +281,8 @@ def video_process(self,
min(1,df.xmax.values[idx])*video_duration]}
proposal_list.append(tmp_prop)
- video_name = video_name[2:] if video_name[:2] == 'v_' else video_name
+ video_name = video_name[
+ 2:] if video_name[:2] == 'v_' else video_name
result_dict[video_name] = proposal_list
def cal_metrics(self,
@@ -289,13 +292,14 @@ def cal_metrics(self,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation'):
- anet_proposal = ANETproposal(ground_truth_filename,
- proposal_filename,
- tiou_thresholds=tiou_thresholds,
- max_avg_nr_proposals=max_avg_nr_proposals,
- subset=subset,
- verbose=True,
- check_status=False)
+ anet_proposal = ANETproposal(
+ ground_truth_filename,
+ proposal_filename,
+ tiou_thresholds=tiou_thresholds,
+ max_avg_nr_proposals=max_avg_nr_proposals,
+ subset=subset,
+ verbose=True,
+ check_status=False)
anet_proposal.evaluate()
recall = anet_proposal.recall
average_recall = anet_proposal.avg_recall
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/build.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/build.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/center_crop_metric.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/center_crop_metric.py
old mode 100644
new mode 100755
index b3bfb1b23..6364ec200
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/center_crop_metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/center_crop_metric.py
@@ -48,8 +48,8 @@ def update(self, batch_id, data, outputs):
# preds ensemble
if batch_id % self.log_interval == 0:
logger.info("[TEST] Processing batch {}/{} ...".format(
- batch_id,
- self.data_size // (self.batch_size * self.world_size)))
+ batch_id, self.data_size // (self.batch_size * self.world_size
+ )))
def accumulate(self):
"""accumulate metrics when finished all iters.
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/center_crop_metric_MRI.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/center_crop_metric_MRI.py
old mode 100644
new mode 100755
index 843a9c36a..775115497
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/center_crop_metric_MRI.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/center_crop_metric_MRI.py
@@ -52,8 +52,8 @@ def update(self, batch_id, data, outputs):
# preds ensemble
if batch_id % self.log_interval == 0:
logger.info("[TEST] Processing batch {}/{} ...".format(
- batch_id,
- self.data_size // (self.batch_size * self.world_size)))
+ batch_id, self.data_size // (self.batch_size * self.world_size
+ )))
def accumulate(self):
"""accumulate metrics when finished all iters.
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/depth_metric.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/depth_metric.py
old mode 100644
new mode 100755
index c160e16ba..70e09ed30
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/depth_metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/depth_metric.py
@@ -60,18 +60,18 @@ def update(self, batch_id, data, outputs):
self.a3.append(a3)
if batch_id % self.log_interval == 0:
logger.info("[TEST] Processing batch {}/{} ...".format(
- batch_id,
- self.data_size // (self.batch_size * self.world_size)))
+ batch_id, self.data_size // (self.batch_size * self.world_size
+ )))
def accumulate(self):
"""accumulate metrics when finished all iters.
"""
logger.info(
'[TEST] finished, abs_rel= {}, sq_rel= {} , rmse= {}, rmse_log= {},'
- 'a1= {}, a2= {}, a3= {}'.format(np.mean(np.array(self.abs_rel)),
- np.mean(np.array(self.sq_rel)),
- np.mean(np.array(self.rmse)),
- np.mean(np.array(self.rmse_log)),
- np.mean(np.array(self.a1)),
- np.mean(np.array(self.a2)),
- np.mean(np.array(self.a3))))
+ 'a1= {}, a2= {}, a3= {}'.format(
+ np.mean(np.array(self.abs_rel)),
+ np.mean(np.array(self.sq_rel)),
+ np.mean(np.array(self.rmse)),
+ np.mean(np.array(self.rmse_log)),
+ np.mean(np.array(self.a1)),
+ np.mean(np.array(self.a2)), np.mean(np.array(self.a3))))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/msrvtt_metric.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/msrvtt_metric.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/multi_crop_metric.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/multi_crop_metric.py
old mode 100644
new mode 100755
index 2cad6679d..ca4b2a11b
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/multi_crop_metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/multi_crop_metric.py
@@ -81,8 +81,8 @@ def update(self, batch_id, data, outputs):
self.video_labels[vid_id] = labels[ind]
if batch_id % self.log_interval == 0:
logger.info("[TEST] Processing batch {}/{} ...".format(
- batch_id,
- self.data_size // (self.batch_size * self.world_size)))
+ batch_id, self.data_size // (self.batch_size * self.world_size
+ )))
def accumulate(self):
"""accumulate metrics when finished all iters.
@@ -90,19 +90,17 @@ def accumulate(self):
# check clip index of each video
for key in self.clip_count.keys():
if len(self.clip_count[key]) != self.num_clips or sum(
- self.clip_count[key]) != self.num_clips * (self.num_clips -
- 1) / 2:
+ self.clip_count[key]) != self.num_clips * (
+ self.num_clips - 1) / 2:
logger.info(
"[TEST] Count Error!! video [{}] clip count [{}] not match number clips {}"
.format(key, self.clip_count[key], self.num_clips))
video_preds = paddle.to_tensor(self.video_preds)
video_labels = paddle.to_tensor(self.video_labels)
- acc_top1 = paddle.metric.accuracy(input=video_preds,
- label=video_labels,
- k=1)
- acc_top5 = paddle.metric.accuracy(input=video_preds,
- label=video_labels,
- k=5)
+ acc_top1 = paddle.metric.accuracy(
+ input=video_preds, label=video_labels, k=1)
+ acc_top5 = paddle.metric.accuracy(
+ input=video_preds, label=video_labels, k=5)
logger.info('[TEST] finished, avg_acc1= {}, avg_acc5= {} '.format(
acc_top1.numpy(), acc_top5.numpy()))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/recall.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/recall.py
old mode 100644
new mode 100755
index 3612e2244..0acfd1cde
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/recall.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/recall.py
@@ -1,5 +1,6 @@
import numpy as np
-import paddle
+import paddle
+
def _recalls(all_ious, proposal_nums, thrs):
@@ -75,8 +76,7 @@ def eval_recalls(gts, proposals, proposal_nums=None, iou_thrs=None):
ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
else:
ious = bbox_overlaps(
- torch.tensor(gts[i]),
- torch.tensor(img_proposal[:prop_num, :4]))
+ torch.tensor(gts[i]), torch.tensor(img_proposal[:prop_num, :4]))
ious = ious.data.numpy()
all_ious.append(ious)
all_ious = np.array(all_ious)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/registry.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/registry.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/skeleton_metric.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/skeleton_metric.py
old mode 100644
new mode 100755
index edd5d4dc2..98f53d470
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/skeleton_metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/skeleton_metric.py
@@ -31,6 +31,7 @@ class SkeletonMetric(BaseMetric):
Args:
out_file: str, file to save test results.
"""
+
def __init__(self,
data_size,
batch_size,
@@ -66,8 +67,8 @@ def update(self, batch_id, data, outputs):
# preds ensemble
if batch_id % self.log_interval == 0:
logger.info("[TEST] Processing batch {}/{} ...".format(
- batch_id,
- self.data_size // (self.batch_size * self.world_size)))
+ batch_id, self.data_size // (self.batch_size * self.world_size
+ )))
def accumulate(self):
"""accumulate metrics when finished all iters.
@@ -79,8 +80,7 @@ def accumulate(self):
headers = ['sample_index', 'predict_category']
with open(
self.out_file,
- 'w',
- ) as fp:
+ 'w', ) as fp:
writer = csv.writer(fp)
writer.writerow(headers)
writer.writerows(self.values)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/transnetv2_metric.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/transnetv2_metric.py
old mode 100644
new mode 100755
index 337088176..7a62414d1
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/transnetv2_metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/transnetv2_metric.py
@@ -67,8 +67,10 @@ def evaluate_scenes(gt_scenes, pred_scenes, n_frames_miss_tolerance=2):
"""
shift = n_frames_miss_tolerance / 2
- gt_scenes = gt_scenes.astype(np.float32) + np.array([[-0.5 + shift, 0.5 - shift]])
- pred_scenes = pred_scenes.astype(np.float32) + np.array([[-0.5 + shift, 0.5 - shift]])
+ gt_scenes = gt_scenes.astype(np.float32) + np.array(
+ [[-0.5 + shift, 0.5 - shift]])
+ pred_scenes = pred_scenes.astype(np.float32) + np.array(
+ [[-0.5 + shift, 0.5 - shift]])
gt_trans = np.stack([gt_scenes[:-1, 1], gt_scenes[1:, 0]], 1)
pred_trans = np.stack([pred_scenes[:-1, 1], pred_scenes[1:, 0]], 1)
@@ -110,19 +112,18 @@ def evaluate_scenes(gt_scenes, pred_scenes, n_frames_miss_tolerance=2):
def create_scene_based_summaries(one_hot_pred, one_hot_gt):
- thresholds = np.array([
- 0.02, 0.06, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9
- ])
+ thresholds = np.array(
+ [0.02, 0.06, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
precision, recall, f1, tp, fp, fn = np.zeros_like(thresholds), np.zeros_like(thresholds),\
np.zeros_like(thresholds), np.zeros_like(thresholds),\
np.zeros_like(thresholds), np.zeros_like(thresholds)
gt_scenes = predictions_to_scenes(one_hot_gt)
for i in range(len(thresholds)):
- pred_scenes = predictions_to_scenes(
- (one_hot_pred > thresholds[i]).astype(np.uint8)
- )
- precision[i], recall[i], f1[i], (tp[i], fp[i], fn[i]) = evaluate_scenes(gt_scenes, pred_scenes)
+ pred_scenes = predictions_to_scenes((one_hot_pred > thresholds[i]
+ ).astype(np.uint8))
+ precision[i], recall[i], f1[i], (
+ tp[i], fp[i], fn[i]) = evaluate_scenes(gt_scenes, pred_scenes)
best_idx = np.argmax(f1)
@@ -152,13 +153,14 @@ def update(self, batch_id, data, one_hot):
# preds ensemble
if batch_id % self.log_interval == 0:
logger.info("[TEST] Processing batch {}/{} ...".format(
- batch_id,
- self.data_size // (self.batch_size * self.world_size)))
+ batch_id, self.data_size // (self.batch_size * self.world_size
+ )))
def compute(self, gt_scenes):
predictions = np.concatenate(self.predictions, 0)[:len(frames)]
_, _, _, (tp, fp, fn), fp_mistakes, fn_mistakes = evaluate_scenes(
- gt_scenes, predictions_to_scenes((predictions >= args.thr).astype(np.uint8)))
+ gt_scenes,
+ predictions_to_scenes((predictions >= args.thr).astype(np.uint8)))
self.total_stats["tp"] += tp
self.total_stats["fp"] += fp
@@ -167,8 +169,11 @@ def compute(self, gt_scenes):
def accumulate(self):
"""accumulate metrics when finished all iters.
"""
- p = self.total_stats["tp"] / (self.total_stats["tp"] + self.total_stats["fp"])
- r = self.total_stats["tp"] / (self.total_stats["tp"] + self.total_stats["fn"])
+ p = self.total_stats["tp"] / (
+ self.total_stats["tp"] + self.total_stats["fp"])
+ r = self.total_stats["tp"] / (
+ self.total_stats["tp"] + self.total_stats["fn"])
f1 = (p * r * 2) / (p + r)
- logger.info('[TEST] finished, Precision= {:5.2f}, Recall= {:5.2f} , F1 Score= {:5.2f} '.format(
- p * 100, r * 100, f1 * 100))
\ No newline at end of file
+ logger.info(
+ '[TEST] finished, Precision= {:5.2f}, Recall= {:5.2f} , F1 Score= {:5.2f} '.
+ format(p * 100, r * 100, f1 * 100))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/vos_metric.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/vos_metric.py
old mode 100644
new mode 100755
index 19762f686..a027f5c5a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/vos_metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/vos_metric.py
@@ -52,13 +52,14 @@ def update(self, batch_id, data, model):
seq_dataset = data
seq_name = seq_dataset.seq_name
- logger.info('Prcessing Seq {} [{}/{}]:'.format(seq_name, self.video_num,
- self.total_video_num))
- seq_dataloader = DataLoader(seq_dataset,
- return_list=True,
- batch_size=1,
- shuffle=False,
- num_workers=0)
+ logger.info('Prcessing Seq {} [{}/{}]:'.format(
+ seq_name, self.video_num, self.total_video_num))
+ seq_dataloader = DataLoader(
+ seq_dataset,
+ return_list=True,
+ batch_size=1,
+ shuffle=False,
+ num_workers=0)
seq_total_time = 0
seq_total_frame = 0
ref_embeddings = []
@@ -102,13 +103,13 @@ def update(self, batch_id, data, model):
[ori_height, ori_width], obj_num
]
- all_pred, current_embedding = model(data_batch, mode='test')
+ all_pred, current_embedding = model(
+ data_batch, mode='test')
if frame_idx == 0:
if current_label is None:
- logger.info(
- "No first frame label in Seq {}.".format(
- seq_name))
+ logger.info("No first frame label in Seq {}.".
+ format(seq_name))
ref_embeddings[aug_idx].append(current_embedding)
ref_masks[aug_idx].append(current_label)
@@ -121,8 +122,8 @@ def update(self, batch_id, data, model):
# have to introduce new labels for new objects, if necessary.
if not sample['meta']['flip'] and not (
current_label is None) and join_label is None:
- join_label = paddle.cast(current_label,
- dtype='int64')
+ join_label = paddle.cast(
+ current_label, dtype='int64')
all_preds.append(all_pred)
if current_label is not None:
ref_embeddings[aug_idx].append(current_embedding)
@@ -134,11 +135,12 @@ def update(self, batch_id, data, model):
all_preds, axis=0) #average results if augmentation
pred_label = paddle.argmax(all_preds, axis=0)
if join_label is not None:
- join_label = paddle.squeeze(paddle.squeeze(join_label,
- axis=0),
- axis=0)
+ join_label = paddle.squeeze(
+ paddle.squeeze(
+ join_label, axis=0), axis=0)
keep = paddle.cast((join_label == 0), dtype="int64")
- pred_label = pred_label * keep + join_label * (1 - keep)
+ pred_label = pred_label * keep + join_label * (1 - keep
+ )
pred_label = pred_label
current_label = paddle.reshape(
pred_label, shape=[1, 1, ori_height, ori_width])
@@ -171,8 +173,8 @@ def update(self, batch_id, data, model):
else:
one_frametime = time.time() - time_start
seq_total_time += one_frametime
- logger.info('Ref Frame: {}, Time: {}'.format(
- imgname[0], one_frametime))
+ logger.info('Ref Frame: {}, Time: {}'.format(imgname[
+ 0], one_frametime))
del (ref_embeddings)
del (ref_masks)
@@ -188,12 +190,12 @@ def update(self, batch_id, data, model):
self.total_sfps += seq_avg_time_per_frame
avg_sfps = self.total_sfps / (batch_id + 1)
logger.info("Seq {} FPS: {}, Total FPS: {}, FPS per Seq: {}".format(
- seq_name, 1. / seq_avg_time_per_frame,
- 1. / total_avg_time_per_frame, 1. / avg_sfps))
+ seq_name, 1. / seq_avg_time_per_frame, 1. /
+ total_avg_time_per_frame, 1. / avg_sfps))
def flip_tensor(self, tensor, dim=0):
- inv_idx = paddle.cast(paddle.arange(tensor.shape[dim] - 1, -1, -1),
- dtype="int64")
+ inv_idx = paddle.cast(
+ paddle.arange(tensor.shape[dim] - 1, -1, -1), dtype="int64")
tensor = paddle.index_select(x=tensor, index=inv_idx, axis=dim)
return tensor
@@ -201,58 +203,58 @@ def save_mask(self, mask_tensor, path):
_palette = [
0, 0, 0, 128, 0, 0, 0, 128, 0, 128, 128, 0, 0, 0, 128, 128, 0, 128,
0, 128, 128, 128, 128, 128, 64, 0, 0, 191, 0, 0, 64, 128, 0, 191,
- 128, 0, 64, 0, 128, 191, 0, 128, 64, 128, 128, 191, 128, 128, 0, 64,
- 0, 128, 64, 0, 0, 191, 0, 128, 191, 0, 0, 64, 128, 128, 64, 128, 22,
- 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, 25, 26, 26, 26, 27, 27, 27,
- 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32, 32, 33, 33,
- 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38, 38, 39,
- 39, 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44, 44,
- 45, 45, 45, 46, 46, 46, 47, 47, 47, 48, 48, 48, 49, 49, 49, 50, 50,
- 50, 51, 51, 51, 52, 52, 52, 53, 53, 53, 54, 54, 54, 55, 55, 55, 56,
- 56, 56, 57, 57, 57, 58, 58, 58, 59, 59, 59, 60, 60, 60, 61, 61, 61,
- 62, 62, 62, 63, 63, 63, 64, 64, 64, 65, 65, 65, 66, 66, 66, 67, 67,
- 67, 68, 68, 68, 69, 69, 69, 70, 70, 70, 71, 71, 71, 72, 72, 72, 73,
- 73, 73, 74, 74, 74, 75, 75, 75, 76, 76, 76, 77, 77, 77, 78, 78, 78,
- 79, 79, 79, 80, 80, 80, 81, 81, 81, 82, 82, 82, 83, 83, 83, 84, 84,
- 84, 85, 85, 85, 86, 86, 86, 87, 87, 87, 88, 88, 88, 89, 89, 89, 90,
- 90, 90, 91, 91, 91, 92, 92, 92, 93, 93, 93, 94, 94, 94, 95, 95, 95,
- 96, 96, 96, 97, 97, 97, 98, 98, 98, 99, 99, 99, 100, 100, 100, 101,
- 101, 101, 102, 102, 102, 103, 103, 103, 104, 104, 104, 105, 105,
- 105, 106, 106, 106, 107, 107, 107, 108, 108, 108, 109, 109, 109,
- 110, 110, 110, 111, 111, 111, 112, 112, 112, 113, 113, 113, 114,
- 114, 114, 115, 115, 115, 116, 116, 116, 117, 117, 117, 118, 118,
- 118, 119, 119, 119, 120, 120, 120, 121, 121, 121, 122, 122, 122,
- 123, 123, 123, 124, 124, 124, 125, 125, 125, 126, 126, 126, 127,
- 127, 127, 128, 128, 128, 129, 129, 129, 130, 130, 130, 131, 131,
- 131, 132, 132, 132, 133, 133, 133, 134, 134, 134, 135, 135, 135,
- 136, 136, 136, 137, 137, 137, 138, 138, 138, 139, 139, 139, 140,
- 140, 140, 141, 141, 141, 142, 142, 142, 143, 143, 143, 144, 144,
- 144, 145, 145, 145, 146, 146, 146, 147, 147, 147, 148, 148, 148,
- 149, 149, 149, 150, 150, 150, 151, 151, 151, 152, 152, 152, 153,
- 153, 153, 154, 154, 154, 155, 155, 155, 156, 156, 156, 157, 157,
- 157, 158, 158, 158, 159, 159, 159, 160, 160, 160, 161, 161, 161,
- 162, 162, 162, 163, 163, 163, 164, 164, 164, 165, 165, 165, 166,
- 166, 166, 167, 167, 167, 168, 168, 168, 169, 169, 169, 170, 170,
- 170, 171, 171, 171, 172, 172, 172, 173, 173, 173, 174, 174, 174,
- 175, 175, 175, 176, 176, 176, 177, 177, 177, 178, 178, 178, 179,
- 179, 179, 180, 180, 180, 181, 181, 181, 182, 182, 182, 183, 183,
- 183, 184, 184, 184, 185, 185, 185, 186, 186, 186, 187, 187, 187,
- 188, 188, 188, 189, 189, 189, 190, 190, 190, 191, 191, 191, 192,
- 192, 192, 193, 193, 193, 194, 194, 194, 195, 195, 195, 196, 196,
- 196, 197, 197, 197, 198, 198, 198, 199, 199, 199, 200, 200, 200,
- 201, 201, 201, 202, 202, 202, 203, 203, 203, 204, 204, 204, 205,
- 205, 205, 206, 206, 206, 207, 207, 207, 208, 208, 208, 209, 209,
- 209, 210, 210, 210, 211, 211, 211, 212, 212, 212, 213, 213, 213,
- 214, 214, 214, 215, 215, 215, 216, 216, 216, 217, 217, 217, 218,
- 218, 218, 219, 219, 219, 220, 220, 220, 221, 221, 221, 222, 222,
- 222, 223, 223, 223, 224, 224, 224, 225, 225, 225, 226, 226, 226,
- 227, 227, 227, 228, 228, 228, 229, 229, 229, 230, 230, 230, 231,
- 231, 231, 232, 232, 232, 233, 233, 233, 234, 234, 234, 235, 235,
- 235, 236, 236, 236, 237, 237, 237, 238, 238, 238, 239, 239, 239,
- 240, 240, 240, 241, 241, 241, 242, 242, 242, 243, 243, 243, 244,
- 244, 244, 245, 245, 245, 246, 246, 246, 247, 247, 247, 248, 248,
- 248, 249, 249, 249, 250, 250, 250, 251, 251, 251, 252, 252, 252,
- 253, 253, 253, 254, 254, 254, 255, 255, 255
+ 128, 0, 64, 0, 128, 191, 0, 128, 64, 128, 128, 191, 128, 128, 0,
+ 64, 0, 128, 64, 0, 0, 191, 0, 128, 191, 0, 0, 64, 128, 128, 64,
+ 128, 22, 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, 25, 26, 26, 26,
+ 27, 27, 27, 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32,
+ 32, 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38,
+ 38, 38, 39, 39, 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43, 43, 43,
+ 44, 44, 44, 45, 45, 45, 46, 46, 46, 47, 47, 47, 48, 48, 48, 49, 49,
+ 49, 50, 50, 50, 51, 51, 51, 52, 52, 52, 53, 53, 53, 54, 54, 54, 55,
+ 55, 55, 56, 56, 56, 57, 57, 57, 58, 58, 58, 59, 59, 59, 60, 60, 60,
+ 61, 61, 61, 62, 62, 62, 63, 63, 63, 64, 64, 64, 65, 65, 65, 66, 66,
+ 66, 67, 67, 67, 68, 68, 68, 69, 69, 69, 70, 70, 70, 71, 71, 71, 72,
+ 72, 72, 73, 73, 73, 74, 74, 74, 75, 75, 75, 76, 76, 76, 77, 77, 77,
+ 78, 78, 78, 79, 79, 79, 80, 80, 80, 81, 81, 81, 82, 82, 82, 83, 83,
+ 83, 84, 84, 84, 85, 85, 85, 86, 86, 86, 87, 87, 87, 88, 88, 88, 89,
+ 89, 89, 90, 90, 90, 91, 91, 91, 92, 92, 92, 93, 93, 93, 94, 94, 94,
+ 95, 95, 95, 96, 96, 96, 97, 97, 97, 98, 98, 98, 99, 99, 99, 100,
+ 100, 100, 101, 101, 101, 102, 102, 102, 103, 103, 103, 104, 104,
+ 104, 105, 105, 105, 106, 106, 106, 107, 107, 107, 108, 108, 108,
+ 109, 109, 109, 110, 110, 110, 111, 111, 111, 112, 112, 112, 113,
+ 113, 113, 114, 114, 114, 115, 115, 115, 116, 116, 116, 117, 117,
+ 117, 118, 118, 118, 119, 119, 119, 120, 120, 120, 121, 121, 121,
+ 122, 122, 122, 123, 123, 123, 124, 124, 124, 125, 125, 125, 126,
+ 126, 126, 127, 127, 127, 128, 128, 128, 129, 129, 129, 130, 130,
+ 130, 131, 131, 131, 132, 132, 132, 133, 133, 133, 134, 134, 134,
+ 135, 135, 135, 136, 136, 136, 137, 137, 137, 138, 138, 138, 139,
+ 139, 139, 140, 140, 140, 141, 141, 141, 142, 142, 142, 143, 143,
+ 143, 144, 144, 144, 145, 145, 145, 146, 146, 146, 147, 147, 147,
+ 148, 148, 148, 149, 149, 149, 150, 150, 150, 151, 151, 151, 152,
+ 152, 152, 153, 153, 153, 154, 154, 154, 155, 155, 155, 156, 156,
+ 156, 157, 157, 157, 158, 158, 158, 159, 159, 159, 160, 160, 160,
+ 161, 161, 161, 162, 162, 162, 163, 163, 163, 164, 164, 164, 165,
+ 165, 165, 166, 166, 166, 167, 167, 167, 168, 168, 168, 169, 169,
+ 169, 170, 170, 170, 171, 171, 171, 172, 172, 172, 173, 173, 173,
+ 174, 174, 174, 175, 175, 175, 176, 176, 176, 177, 177, 177, 178,
+ 178, 178, 179, 179, 179, 180, 180, 180, 181, 181, 181, 182, 182,
+ 182, 183, 183, 183, 184, 184, 184, 185, 185, 185, 186, 186, 186,
+ 187, 187, 187, 188, 188, 188, 189, 189, 189, 190, 190, 190, 191,
+ 191, 191, 192, 192, 192, 193, 193, 193, 194, 194, 194, 195, 195,
+ 195, 196, 196, 196, 197, 197, 197, 198, 198, 198, 199, 199, 199,
+ 200, 200, 200, 201, 201, 201, 202, 202, 202, 203, 203, 203, 204,
+ 204, 204, 205, 205, 205, 206, 206, 206, 207, 207, 207, 208, 208,
+ 208, 209, 209, 209, 210, 210, 210, 211, 211, 211, 212, 212, 212,
+ 213, 213, 213, 214, 214, 214, 215, 215, 215, 216, 216, 216, 217,
+ 217, 217, 218, 218, 218, 219, 219, 219, 220, 220, 220, 221, 221,
+ 221, 222, 222, 222, 223, 223, 223, 224, 224, 224, 225, 225, 225,
+ 226, 226, 226, 227, 227, 227, 228, 228, 228, 229, 229, 229, 230,
+ 230, 230, 231, 231, 231, 232, 232, 232, 233, 233, 233, 234, 234,
+ 234, 235, 235, 235, 236, 236, 236, 237, 237, 237, 238, 238, 238,
+ 239, 239, 239, 240, 240, 240, 241, 241, 241, 242, 242, 242, 243,
+ 243, 243, 244, 244, 244, 245, 245, 245, 246, 246, 246, 247, 247,
+ 247, 248, 248, 248, 249, 249, 249, 250, 250, 250, 251, 251, 251,
+ 252, 252, 252, 253, 253, 253, 254, 254, 254, 255, 255, 255
]
mask = mask_tensor.cpu().numpy().astype('uint8')
mask = Image.fromarray(mask).convert('P')
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/average_precision_calculator.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/average_precision_calculator.py
old mode 100644
new mode 100755
index bdbd6e0d0..6f064a4b9
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/average_precision_calculator.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/average_precision_calculator.py
@@ -59,6 +59,7 @@
class AveragePrecisionCalculator(object):
"""Calculate the average precision and average precision at n."""
+
def __init__(self, top_n=None):
"""Construct an AveragePrecisionCalculator to calculate average precision.
@@ -150,10 +151,11 @@ def peek_ap_at_n(self):
return 0
predlists = numpy.array(list(zip(*self._heap)))
- ap = self.ap_at_n(predlists[0],
- predlists[1],
- n=self._top_n,
- total_num_positives=self._total_positives)
+ ap = self.ap_at_n(
+ predlists[0],
+ predlists[1],
+ n=self._top_n,
+ total_num_positives=self._total_positives)
return ap
@staticmethod
@@ -215,11 +217,12 @@ def ap_at_n(predictions, actuals, n=20, total_num_positives=None):
actuals = numpy.array(actuals)
# add a shuffler to avoid overestimating the ap
- predictions, actuals = AveragePrecisionCalculator._shuffle(
- predictions, actuals)
- sortidx = sorted(range(len(predictions)),
- key=lambda k: predictions[k],
- reverse=True)
+ predictions, actuals = AveragePrecisionCalculator._shuffle(predictions,
+ actuals)
+ sortidx = sorted(
+ range(len(predictions)),
+ key=lambda k: predictions[k],
+ reverse=True)
if total_num_positives is None:
numpos = numpy.size(numpy.where(actuals > 0))
@@ -269,6 +272,6 @@ def _zero_one_normalize(predictions, epsilon=1e-7):
The normalized prediction.
"""
denominator = numpy.max(predictions) - numpy.min(predictions)
- ret = (predictions - numpy.min(predictions)) / numpy.max(
- denominator, epsilon)
+ ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
+ epsilon)
return ret
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/eval_util.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/eval_util.py
old mode 100644
new mode 100755
index abcf0d8f2..52aa86788
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/eval_util.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/eval_util.py
@@ -92,8 +92,8 @@ def calculate_gap(predictions, actuals, top_k=20):
gap_calculator = ap_calculator.AveragePrecisionCalculator()
sparse_predictions, sparse_labels, num_positives = top_k_by_class(
predictions, actuals, top_k)
- gap_calculator.accumulate(flatten(sparse_predictions),
- flatten(sparse_labels), sum(num_positives))
+ gap_calculator.accumulate(
+ flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))
return gap_calculator.peek_ap_at_n()
@@ -147,11 +147,8 @@ def top_k_triplets(predictions, labels, k=20):
@METRIC.register
class HitOneMetric(BaseMetric):
"""A class to store the evaluation metrics."""
- def __init__(self,
- num_class,
- top_k,
- data_size,
- batch_size,
+
+ def __init__(self, num_class, top_k, data_size, batch_size,
log_interval=20):
"""Construct an HitOneMetric object to store the evaluation metrics."""
self.hit_at_one = []
@@ -162,8 +159,9 @@ def __init__(self,
def accumulate(self):
logger.info(
'[TEST] finished, hit_at_one = {:.5f}, perr = {:.5f}, gap = {:.5f}'.
- format(np.mean(np.array(self.hit_at_one)),
- np.mean(np.array(self.perr)), np.mean(np.array(self.gap))))
+ format(
+ np.mean(np.array(self.hit_at_one)),
+ np.mean(np.array(self.perr)), np.mean(np.array(self.gap))))
def clear(self):
"""Clear the evaluation metrics and reset the HitOneMetric object."""
@@ -194,5 +192,4 @@ def update(self, batch_id, data, outputs):
if batch_id % self.log_interval == 0:
logger.info("[TEST] Processing batch {}/{}...".format(
batch_id,
- self.data_size // (self.batch_size * self.world_size),
- ))
+ self.data_size // (self.batch_size * self.world_size), ))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/mean_average_precision_calculator.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/mean_average_precision_calculator.py
old mode 100644
new mode 100755
index 0ae8b0ed3..fdaf6de63
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/mean_average_precision_calculator.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/metrics/youtube8m/mean_average_precision_calculator.py
@@ -97,8 +97,8 @@ def clear(self):
calculator.clear()
def is_empty(self):
- return ([calculator.heap_size for calculator in self._ap_calculators] ==
- [0 for _ in range(self._num_class)])
+ return ([calculator.heap_size for calculator in self._ap_calculators]
+ == [0 for _ in range(self._num_class)])
def peek_map_at_n(self):
"""Peek the non-interpolated mean average precision at n.
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/assigners/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/assigners/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/assigners/max_iou_assigner_ava.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/assigners/max_iou_assigner_ava.py
old mode 100644
new mode 100755
index 2515c858b..4d265cd14
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/assigners/max_iou_assigner_ava.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/assigners/max_iou_assigner_ava.py
@@ -16,6 +16,7 @@
from ..registry import BBOX_ASSIGNERS
from ..bbox_utils import bbox_overlaps
+
class AssignResult():
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
@@ -30,13 +31,16 @@ def add_gt_(self, gt_labels):
self.gt_inds = paddle.concat([self_inds, gt_inds_squeeze])
gt_label_ones = paddle.full((len(gt_labels), ), 1, dtype='float32')
max_overlaps_squeeze = paddle.squeeze(self.max_overlaps, axis=0)
- self.max_overlaps = paddle.concat([gt_label_ones, max_overlaps_squeeze])
+ self.max_overlaps = paddle.concat(
+ [gt_label_ones, max_overlaps_squeeze])
if self.labels is not None:
self.labels = paddle.concat([gt_labels, self.labels])
+
@BBOX_ASSIGNERS.register()
class MaxIoUAssignerAVA():
"""Assign a corresponding gt bbox or background to each bbox. """
+
def __init__(self,
pos_iou_thr,
neg_iou_thr,
@@ -56,10 +60,7 @@ def __init__(self,
self.gpu_assign_thr = gpu_assign_thr
self.match_low_quality = match_low_quality
- def assign(self,
- bboxes,
- gt_bboxes,
- gt_labels=None):
+ def assign(self, bboxes, gt_bboxes, gt_labels=None):
"""Assign gt to bboxes. """
overlaps = bbox_overlaps(gt_bboxes, bboxes)
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
@@ -76,17 +77,19 @@ def assign_wrt_overlaps(self, overlaps, gt_labels=None):
max_overlaps, argmax_overlaps = paddle.topk(overlaps, k=1, axis=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
- gt_max_overlaps, gt_argmax_overlaps = paddle.topk(overlaps, k=1, axis=1)
+ gt_max_overlaps, gt_argmax_overlaps = paddle.topk(
+ overlaps, k=1, axis=1)
# 2. assign negative: below the negative inds are set to be 0
match_labels = paddle.full(argmax_overlaps.shape, -1, dtype='int32')
match_labels = paddle.where(max_overlaps < self.neg_iou_thr,
- paddle.zeros_like(match_labels), match_labels)
+ paddle.zeros_like(match_labels),
+ match_labels)
# 3. assign positive: above positive IoU threshold
argmax_overlaps_int32 = paddle.cast(argmax_overlaps, 'int32')
match_labels = paddle.where(max_overlaps >= self.pos_iou_thr,
- argmax_overlaps_int32 + 1, match_labels)
+ argmax_overlaps_int32 + 1, match_labels)
assigned_gt_inds = match_labels
if self.match_low_quality:
# Low-quality matching will overwirte the assigned_gt_inds
@@ -104,9 +107,12 @@ def assign_wrt_overlaps(self, overlaps, gt_labels=None):
equal_y_np = gt_max_overlaps[i].numpy()
max_iou_inds = np.equal(equal_x_np, equal_y_np)
max_iou_inds = paddle.to_tensor(max_iou_inds)
- max_iou_inds = paddle.reshape( max_iou_inds, [1,max_iou_inds.shape[0]] )
- match_labels_gts = paddle.full(max_iou_inds.shape, i+1, dtype='int32')
- match_labels = paddle.where(max_iou_inds, match_labels_gts, match_labels)
+ max_iou_inds = paddle.reshape(
+ max_iou_inds, [1, max_iou_inds.shape[0]])
+ match_labels_gts = paddle.full(
+ max_iou_inds.shape, i + 1, dtype='int32')
+ match_labels = paddle.where(
+ max_iou_inds, match_labels_gts, match_labels)
assigned_gt_inds = match_labels
else:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
@@ -114,35 +120,37 @@ def assign_wrt_overlaps(self, overlaps, gt_labels=None):
if gt_labels is not None:
# consider multi-class case (AVA)
assert len(gt_labels[0]) > 1
- assigned_labels = paddle.full([num_bboxes, len(gt_labels[0])], 0, dtype='float32')
- assigned_gt_inds_reshape = assigned_gt_inds.reshape([assigned_gt_inds.shape[1]])
- pos_inds = paddle.nonzero( assigned_gt_inds_reshape , as_tuple=False)
+ assigned_labels = paddle.full(
+ [num_bboxes, len(gt_labels[0])], 0, dtype='float32')
+ assigned_gt_inds_reshape = assigned_gt_inds.reshape(
+ [assigned_gt_inds.shape[1]])
+ pos_inds = paddle.nonzero(assigned_gt_inds_reshape, as_tuple=False)
pos_inds_num = paddle.numel(pos_inds).numpy()[0]
if pos_inds_num > 0:
- pos_inds = paddle.squeeze(pos_inds, axis = 1 )
- assigned_gt_inds_squeeze = paddle.squeeze(assigned_gt_inds, axis=0)
- assigned_gt_inds_select = paddle.index_select(assigned_gt_inds_squeeze, pos_inds) - 1
- gt_labels_select = paddle.index_select(gt_labels, assigned_gt_inds_select)
+ pos_inds = paddle.squeeze(pos_inds, axis=1)
+ assigned_gt_inds_squeeze = paddle.squeeze(
+ assigned_gt_inds, axis=0)
+ assigned_gt_inds_select = paddle.index_select(
+ assigned_gt_inds_squeeze, pos_inds) - 1
+ gt_labels_select = paddle.index_select(gt_labels,
+ assigned_gt_inds_select)
A = assigned_gt_inds_squeeze
X = assigned_gt_inds_squeeze - 1
Y = paddle.zeros_like(X)
- if A.shape[0]==1:
- if A.numpy()[0]>0:
- T=X
+ if A.shape[0] == 1:
+ if A.numpy()[0] > 0:
+ T = X
else:
- T=Y
+ T = Y
else:
- T = paddle.where(A>0, X, Y)
+ T = paddle.where(A > 0, X, Y)
S = paddle.index_select(gt_labels, T)
- AE = paddle.expand(A, [S.shape[1], A.shape[0]])
+ AE = paddle.expand(A, [S.shape[1], A.shape[0]])
AET = paddle.transpose(AE, perm=[1, 0])
- R = paddle.where(AET>0, S, assigned_labels)
+ R = paddle.where(AET > 0, S, assigned_labels)
assigned_labels = R
else:
assigned_labels = None
ret = AssignResult(
- num_gts,
- assigned_gt_inds,
- max_overlaps,
- labels=assigned_labels)
+ num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
return ret
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/actbert.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/actbert.py
old mode 100644
new mode 100755
index dbee1fd8c..f682e416b
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/actbert.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/actbert.py
@@ -32,12 +32,12 @@
class BertEmbeddings(nn.Layer):
"""Construct the embeddings from word, position and token_type embeddings.
"""
+
def __init__(self, vocab_size, max_position_embeddings, type_vocab_size,
hidden_size, hidden_dropout_prob):
super(BertEmbeddings, self).__init__()
- self.word_embeddings = nn.Embedding(vocab_size,
- hidden_size,
- padding_idx=0)
+ self.word_embeddings = nn.Embedding(
+ vocab_size, hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(max_position_embeddings,
hidden_size)
self.token_type_embeddings = nn.Embedding(type_vocab_size, hidden_size)
@@ -189,8 +189,8 @@ class BertIntermediate(nn.Layer):
def __init__(self, hidden_size, intermediate_size, hidden_act):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
- if isinstance(hidden_act, str) or (sys.version_info[0] == 2
- and isinstance(hidden_act, str)):
+ if isinstance(hidden_act, str) or (sys.version_info[0] == 2 and
+ isinstance(hidden_act, str)):
self.intermediate_act_fn = ACT2FN[hidden_act]
else:
self.intermediate_act_fn = hidden_act
@@ -218,19 +218,19 @@ def forward(self, hidden_states, input_tensor):
class BertEntAttention(nn.Layer):
"""Core mudule of tangled transformer.
"""
+
def __init__(
- self,
- hidden_size,
- v_hidden_size,
- a_hidden_size,
- bi_hidden_size,
- attention_probs_dropout_prob,
- v_attention_probs_dropout_prob,
- a_attention_probs_dropout_prob,
- av_attention_probs_dropout_prob,
- at_attention_probs_dropout_prob,
- bi_num_attention_heads,
- ):
+ self,
+ hidden_size,
+ v_hidden_size,
+ a_hidden_size,
+ bi_hidden_size,
+ attention_probs_dropout_prob,
+ v_attention_probs_dropout_prob,
+ a_attention_probs_dropout_prob,
+ av_attention_probs_dropout_prob,
+ at_attention_probs_dropout_prob,
+ bi_num_attention_heads, ):
super(BertEntAttention, self).__init__()
if bi_hidden_size % bi_num_attention_heads != 0:
raise ValueError(
@@ -278,14 +278,13 @@ def transpose_for_scores(self, x):
return x.transpose((0, 2, 1, 3))
def forward(
- self,
- input_tensor1,
- attention_mask1,
- input_tensor2,
- attention_mask2,
- input_tensor3,
- attention_mask3,
- ):
+ self,
+ input_tensor1,
+ attention_mask1,
+ input_tensor2,
+ attention_mask2,
+ input_tensor3,
+ attention_mask3, ):
# for vision input.
mixed_query_layer1 = self.query1(input_tensor1)
@@ -345,33 +344,33 @@ def do_attention(query_layer, key_layer, value_layer, attention_mask,
context_key_av = self.key_av(context_av).transpose((0, 2, 1))
# interpolate only support 4-D tensor now.
- context_key_av = F.interpolate(context_key_av.unsqueeze(-1),
- size=(key_layer2.shape[2],
- 1)).squeeze(-1)
+ context_key_av = F.interpolate(
+ context_key_av.unsqueeze(-1), size=(key_layer2.shape[2],
+ 1)).squeeze(-1)
context_key_av = self.transpose_for_scores(
context_key_av.transpose((0, 2, 1)))
key_layer2 = key_layer2 + context_key_av
context_key_at = self.key_at(context_at).transpose((0, 2, 1))
- context_key_at = F.interpolate(context_key_at.unsqueeze(-1),
- size=(key_layer1.shape[2],
- 1)).squeeze(-1)
+ context_key_at = F.interpolate(
+ context_key_at.unsqueeze(-1), size=(key_layer1.shape[2],
+ 1)).squeeze(-1)
context_key_at = self.transpose_for_scores(
context_key_at.transpose((0, 2, 1)))
key_layer1 = key_layer1 + context_key_at
context_val_av = self.value_at(context_av).transpose((0, 2, 1))
- context_val_av = F.interpolate(context_val_av.unsqueeze(-1),
- size=(value_layer2.shape[2],
- 1)).squeeze(-1)
+ context_val_av = F.interpolate(
+ context_val_av.unsqueeze(-1), size=(value_layer2.shape[2],
+ 1)).squeeze(-1)
context_val_av = self.transpose_for_scores(
context_val_av.transpose((0, 2, 1)))
value_layer2 = value_layer2 + context_val_av
context_val_at = self.value_at(context_at).transpose((0, 2, 1))
- context_val_at = F.interpolate(context_val_at.unsqueeze(-1),
- size=(value_layer1.shape[2],
- 1)).squeeze(-1)
+ context_val_at = F.interpolate(
+ context_val_at.unsqueeze(-1), size=(value_layer1.shape[2],
+ 1)).squeeze(-1)
context_val_at = self.transpose_for_scores(
context_val_at.transpose((0, 2, 1)))
value_layer1 = value_layer1 + context_val_at
@@ -388,13 +387,12 @@ def do_attention(query_layer, key_layer, value_layer, attention_mask,
class BertEntOutput(nn.Layer):
def __init__(
- self,
- bi_hidden_size,
- hidden_size,
- v_hidden_size,
- v_hidden_dropout_prob,
- hidden_dropout_prob,
- ):
+ self,
+ bi_hidden_size,
+ hidden_size,
+ v_hidden_size,
+ v_hidden_dropout_prob,
+ hidden_dropout_prob, ):
super(BertEntOutput, self).__init__()
self.dense1 = nn.Linear(bi_hidden_size, v_hidden_size)
@@ -410,14 +408,13 @@ def __init__(
self.dropout3 = nn.Dropout(hidden_dropout_prob)
def forward(
- self,
- hidden_states1,
- input_tensor1,
- hidden_states2,
- input_tensor2,
- hidden_states3,
- input_tensor3,
- ):
+ self,
+ hidden_states1,
+ input_tensor1,
+ hidden_states2,
+ input_tensor2,
+ hidden_states3,
+ input_tensor3, ):
context_state1 = self.dense1(hidden_states1)
context_state1 = self.dropout1(context_state1)
@@ -448,23 +445,22 @@ def __init__(self, hidden_size, intermediate_size, hidden_act,
hidden_dropout_prob)
def forward(self, hidden_states, attention_mask):
- attention_output, attention_probs = self.attention(
- hidden_states, attention_mask)
+ attention_output, attention_probs = self.attention(hidden_states,
+ attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, attention_probs
class BertConnectionLayer(nn.Layer):
- def __init__(self, hidden_size, v_hidden_size, a_hidden_size,
- bi_hidden_size, bi_num_attention_heads,
- attention_probs_dropout_prob, v_attention_probs_dropout_prob,
- a_attention_probs_dropout_prob,
- av_attention_probs_dropout_prob,
- at_attention_probs_dropout_prob, intermediate_size,
- v_intermediate_size, a_intermediate_size, hidden_act,
- v_hidden_act, a_hidden_act, hidden_dropout_prob,
- v_hidden_dropout_prob, a_hidden_dropout_prob):
+ def __init__(
+ self, hidden_size, v_hidden_size, a_hidden_size, bi_hidden_size,
+ bi_num_attention_heads, attention_probs_dropout_prob,
+ v_attention_probs_dropout_prob, a_attention_probs_dropout_prob,
+ av_attention_probs_dropout_prob, at_attention_probs_dropout_prob,
+ intermediate_size, v_intermediate_size, a_intermediate_size,
+ hidden_act, v_hidden_act, a_hidden_act, hidden_dropout_prob,
+ v_hidden_dropout_prob, a_hidden_dropout_prob):
super(BertConnectionLayer, self).__init__()
self.ent_attention = BertEntAttention(
hidden_size,
@@ -476,20 +472,17 @@ def __init__(self, hidden_size, v_hidden_size, a_hidden_size,
a_attention_probs_dropout_prob,
av_attention_probs_dropout_prob,
at_attention_probs_dropout_prob,
- bi_num_attention_heads,
- )
+ bi_num_attention_heads, )
self.ent_output = BertEntOutput(
bi_hidden_size,
hidden_size,
v_hidden_size,
v_hidden_dropout_prob,
- hidden_dropout_prob,
- )
+ hidden_dropout_prob, )
- self.v_intermediate = BertIntermediate(v_hidden_size,
- v_intermediate_size,
- v_hidden_act)
+ self.v_intermediate = BertIntermediate(
+ v_hidden_size, v_intermediate_size, v_hidden_act)
self.v_output = BertOutput(v_intermediate_size, v_hidden_size,
v_hidden_dropout_prob)
@@ -498,29 +491,27 @@ def __init__(self, hidden_size, v_hidden_size, a_hidden_size,
self.t_output = BertOutput(intermediate_size, hidden_size,
hidden_dropout_prob)
- self.a_intermediate = BertIntermediate(a_hidden_size,
- a_intermediate_size,
- a_hidden_act)
+ self.a_intermediate = BertIntermediate(
+ a_hidden_size, a_intermediate_size, a_hidden_act)
self.a_output = BertOutput(a_intermediate_size, a_hidden_size,
a_hidden_dropout_prob)
def forward(
- self,
- input_tensor1,
- attention_mask1,
- input_tensor2,
- attention_mask2,
- input_tensor3,
- attention_mask3,
- ):
+ self,
+ input_tensor1,
+ attention_mask1,
+ input_tensor2,
+ attention_mask2,
+ input_tensor3,
+ attention_mask3, ):
ent_output1, ent_output2, ent_output3 = self.ent_attention(
input_tensor1, attention_mask1, input_tensor2, attention_mask2,
input_tensor3, attention_mask3)
attention_output1, attention_output2, attention_output3 = self.ent_output(
- ent_output1, input_tensor1, ent_output2, input_tensor2, ent_output3,
- input_tensor3)
+ ent_output1, input_tensor1, ent_output2, input_tensor2,
+ ent_output3, input_tensor3)
intermediate_output1 = self.v_intermediate(attention_output1)
layer_output1 = self.v_output(intermediate_output1, attention_output1)
@@ -538,39 +529,39 @@ class BertEncoder(nn.Layer):
"""
ActBert Encoder, consists 3 pathway of multi-BertLayers and BertConnectionLayer.
"""
+
def __init__(
- self,
- v_ent_attention_id,
- t_ent_attention_id,
- a_ent_attention_id,
- fixed_t_layer,
- fixed_v_layer,
- hidden_size,
- v_hidden_size,
- a_hidden_size,
- bi_hidden_size,
- intermediate_size,
- v_intermediate_size,
- a_intermediate_size,
- hidden_act,
- v_hidden_act,
- a_hidden_act,
- hidden_dropout_prob,
- v_hidden_dropout_prob,
- a_hidden_dropout_prob,
- attention_probs_dropout_prob,
- v_attention_probs_dropout_prob,
- a_attention_probs_dropout_prob,
- av_attention_probs_dropout_prob,
- at_attention_probs_dropout_prob,
- num_attention_heads,
- v_num_attention_heads,
- a_num_attention_heads,
- bi_num_attention_heads,
- num_hidden_layers,
- v_num_hidden_layers,
- a_num_hidden_layers,
- ):
+ self,
+ v_ent_attention_id,
+ t_ent_attention_id,
+ a_ent_attention_id,
+ fixed_t_layer,
+ fixed_v_layer,
+ hidden_size,
+ v_hidden_size,
+ a_hidden_size,
+ bi_hidden_size,
+ intermediate_size,
+ v_intermediate_size,
+ a_intermediate_size,
+ hidden_act,
+ v_hidden_act,
+ a_hidden_act,
+ hidden_dropout_prob,
+ v_hidden_dropout_prob,
+ a_hidden_dropout_prob,
+ attention_probs_dropout_prob,
+ v_attention_probs_dropout_prob,
+ a_attention_probs_dropout_prob,
+ av_attention_probs_dropout_prob,
+ at_attention_probs_dropout_prob,
+ num_attention_heads,
+ v_num_attention_heads,
+ a_num_attention_heads,
+ bi_num_attention_heads,
+ num_hidden_layers,
+ v_num_hidden_layers,
+ a_num_hidden_layers, ):
super(BertEncoder, self).__init__()
self.v_ent_attention_id = v_ent_attention_id
self.t_ent_attention_id = t_ent_attention_id
@@ -603,20 +594,20 @@ def __init__(
self.a_layer = nn.LayerList(
[copy.deepcopy(a_layer) for _ in range(a_num_hidden_layers)]) #3
self.c_layer = nn.LayerList([
- copy.deepcopy(connect_layer) for _ in range(len(v_ent_attention_id))
+ copy.deepcopy(connect_layer)
+ for _ in range(len(v_ent_attention_id))
] #2 [0,1]
)
def forward(
- self,
- txt_embedding,
- image_embedding,
- action_embedding,
- txt_attention_mask,
- image_attention_mask,
- action_attention_mask,
- output_all_encoded_layers=True,
- ):
+ self,
+ txt_embedding,
+ image_embedding,
+ action_embedding,
+ txt_attention_mask,
+ image_attention_mask,
+ action_attention_mask,
+ output_all_encoded_layers=True, ):
v_start, a_start, t_start = 0, 0, 0
count = 0
all_encoder_layers_t = []
@@ -699,6 +690,7 @@ class BertPooler(nn.Layer):
""" "Pool" the model by simply taking the hidden state corresponding
to the first token.
"""
+
def __init__(self, hidden_size, bi_hidden_size):
super(BertPooler, self).__init__()
self.dense = nn.Linear(hidden_size, bi_hidden_size)
@@ -713,43 +705,42 @@ def forward(self, hidden_states):
class BertModel(nn.Layer):
def __init__(
- self,
- vocab_size,
- max_position_embeddings,
- type_vocab_size,
- v_feature_size,
- a_feature_size,
- num_hidden_layers,
- v_num_hidden_layers,
- a_num_hidden_layers,
- v_ent_attention_id,
- t_ent_attention_id,
- a_ent_attention_id,
- fixed_t_layer,
- fixed_v_layer,
- hidden_size,
- v_hidden_size,
- a_hidden_size,
- bi_hidden_size,
- intermediate_size,
- v_intermediate_size,
- a_intermediate_size,
- hidden_act,
- v_hidden_act,
- a_hidden_act,
- hidden_dropout_prob,
- v_hidden_dropout_prob,
- a_hidden_dropout_prob,
- attention_probs_dropout_prob,
- v_attention_probs_dropout_prob,
- a_attention_probs_dropout_prob,
- av_attention_probs_dropout_prob,
- at_attention_probs_dropout_prob,
- num_attention_heads,
- v_num_attention_heads,
- a_num_attention_heads,
- bi_num_attention_heads,
- ):
+ self,
+ vocab_size,
+ max_position_embeddings,
+ type_vocab_size,
+ v_feature_size,
+ a_feature_size,
+ num_hidden_layers,
+ v_num_hidden_layers,
+ a_num_hidden_layers,
+ v_ent_attention_id,
+ t_ent_attention_id,
+ a_ent_attention_id,
+ fixed_t_layer,
+ fixed_v_layer,
+ hidden_size,
+ v_hidden_size,
+ a_hidden_size,
+ bi_hidden_size,
+ intermediate_size,
+ v_intermediate_size,
+ a_intermediate_size,
+ hidden_act,
+ v_hidden_act,
+ a_hidden_act,
+ hidden_dropout_prob,
+ v_hidden_dropout_prob,
+ a_hidden_dropout_prob,
+ attention_probs_dropout_prob,
+ v_attention_probs_dropout_prob,
+ a_attention_probs_dropout_prob,
+ av_attention_probs_dropout_prob,
+ at_attention_probs_dropout_prob,
+ num_attention_heads,
+ v_num_attention_heads,
+ a_num_attention_heads,
+ bi_num_attention_heads, ):
super(BertModel, self).__init__()
# initilize word embedding
self.embeddings = BertEmbeddings(vocab_size, max_position_embeddings,
@@ -780,17 +771,16 @@ def __init__(
self.a_pooler = BertPooler(a_hidden_size, bi_hidden_size)
def forward(
- self,
- text_ids,
- action_feat,
- image_feat,
- image_loc,
- token_type_ids=None,
- text_mask=None,
- image_mask=None,
- action_mask=None,
- output_all_encoded_layers=False,
- ):
+ self,
+ text_ids,
+ action_feat,
+ image_feat,
+ image_loc,
+ token_type_ids=None,
+ text_mask=None,
+ image_mask=None,
+ action_mask=None,
+ output_all_encoded_layers=False, ):
"""
text_ids: input text ids. Shape: [batch_size, seqence_length]
action_feat: input action feature. Shape: [batch_size, action_length, action_feature_dim]
@@ -807,12 +797,13 @@ def forward(
if token_type_ids is None:
token_type_ids = paddle.zeros_like(text_ids)
if image_mask is None:
- image_mask = paddle.ones(image_feat.shape[0],
- image_feat.shape[1]).astype(text_ids.dtype)
+ image_mask = paddle.ones(
+ image_feat.shape[0],
+ image_feat.shape[1]).astype(text_ids.dtype)
if action_mask is None:
- action_mask = paddle.ones(action_feat.shape[0],
- action_feat.shape[1]).astype(
- text_ids.dtype)
+ action_mask = paddle.ones(
+ action_feat.shape[0],
+ action_feat.shape[1]).astype(text_ids.dtype)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
@@ -827,7 +818,8 @@ def forward(
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
def set_mask(extended_attention_mask):
- extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
+ extended_attention_mask = (
+ 1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
extended_text_mask = set_mask(extended_text_mask)
@@ -850,8 +842,7 @@ def set_mask(extended_attention_mask):
extended_text_mask,
extended_image_mask,
extended_action_mask,
- output_all_encoded_layers=output_all_encoded_layers,
- )
+ output_all_encoded_layers=output_all_encoded_layers, )
sequence_output_t = encoded_layers_t[-1] #get item from list
sequence_output_v = encoded_layers_v[-1]
@@ -875,8 +866,8 @@ class BertPredictionHeadTransform(nn.Layer):
def __init__(self, hidden_size, hidden_act):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
- if isinstance(hidden_act, str) or (sys.version_info[0] == 2
- and isinstance(hidden_act, str)):
+ if isinstance(hidden_act, str) or (sys.version_info[0] == 2 and
+ isinstance(hidden_act, str)):
self.transform_act_fn = ACT2FN[hidden_act]
else:
self.transform_act_fn = hidden_act
@@ -975,48 +966,48 @@ def forward(self, sequence_output_t, sequence_output_v, sequence_output_a,
class BertForMultiModalPreTraining(nn.Layer):
"""BERT model with multi modal pre-training heads.
"""
+
def __init__(
- self,
- vocab_size=30522,
- max_position_embeddings=512,
- type_vocab_size=2,
- v_target_size=1601,
- a_target_size=700,
- v_feature_size=2048,
- a_feature_size=2048,
- num_hidden_layers=12,
- v_num_hidden_layers=2,
- a_num_hidden_layers=3,
- t_ent_attention_id=[10, 11],
- v_ent_attention_id=[0, 1],
- a_ent_attention_id=[0, 1],
- fixed_t_layer=0,
- fixed_v_layer=0,
- hidden_size=768,
- v_hidden_size=1024,
- a_hidden_size=768,
- bi_hidden_size=1024,
- intermediate_size=3072,
- v_intermediate_size=1024,
- a_intermediate_size=3072,
- hidden_act="gelu",
- v_hidden_act="gelu",
- a_hidden_act="gelu",
- hidden_dropout_prob=0.1,
- v_hidden_dropout_prob=0.1,
- a_hidden_dropout_prob=0.1,
- attention_probs_dropout_prob=0.1,
- v_attention_probs_dropout_prob=0.1,
- a_attention_probs_dropout_prob=0.1,
- av_attention_probs_dropout_prob=0.1,
- at_attention_probs_dropout_prob=0.1,
- num_attention_heads=12,
- v_num_attention_heads=8,
- a_num_attention_heads=12,
- bi_num_attention_heads=8,
- fusion_method="mul",
- pretrained=None,
- ):
+ self,
+ vocab_size=30522,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ v_target_size=1601,
+ a_target_size=700,
+ v_feature_size=2048,
+ a_feature_size=2048,
+ num_hidden_layers=12,
+ v_num_hidden_layers=2,
+ a_num_hidden_layers=3,
+ t_ent_attention_id=[10, 11],
+ v_ent_attention_id=[0, 1],
+ a_ent_attention_id=[0, 1],
+ fixed_t_layer=0,
+ fixed_v_layer=0,
+ hidden_size=768,
+ v_hidden_size=1024,
+ a_hidden_size=768,
+ bi_hidden_size=1024,
+ intermediate_size=3072,
+ v_intermediate_size=1024,
+ a_intermediate_size=3072,
+ hidden_act="gelu",
+ v_hidden_act="gelu",
+ a_hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ v_hidden_dropout_prob=0.1,
+ a_hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ v_attention_probs_dropout_prob=0.1,
+ a_attention_probs_dropout_prob=0.1,
+ av_attention_probs_dropout_prob=0.1,
+ at_attention_probs_dropout_prob=0.1,
+ num_attention_heads=12,
+ v_num_attention_heads=8,
+ a_num_attention_heads=12,
+ bi_num_attention_heads=8,
+ fusion_method="mul",
+ pretrained=None, ):
"""
vocab_size: vocabulary size. Default: 30522.
max_position_embeddings: max position id. Default: 512.
@@ -1097,8 +1088,7 @@ def __init__(
num_attention_heads,
v_num_attention_heads,
a_num_attention_heads,
- bi_num_attention_heads,
- )
+ bi_num_attention_heads, )
self.cls = BertPreTrainingHeads(
hidden_size, v_hidden_size, a_hidden_size, bi_hidden_size,
hidden_act, v_hidden_act, a_hidden_act, v_target_size,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/adds.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/adds.py
old mode 100644
new mode 100755
index 21cd212cb..24010a517
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/adds.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/adds.py
@@ -63,13 +63,14 @@ def convt_bn_relu(in_channels,
bias = not bn
layers = []
layers.append(
- nn.Conv2DTranspose(in_channels,
- out_channels,
- kernel_size,
- stride,
- padding,
- output_padding,
- bias_attr=bias))
+ nn.Conv2DTranspose(
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ padding,
+ output_padding,
+ bias_attr=bias))
if bn:
layers.append(nn.BatchNorm2D(out_channels))
@@ -114,15 +115,14 @@ def get_translation_matrix(translation_vector):
"""
t = translation_vector.reshape([-1, 3, 1])
gather_object = paddle.stack([
- paddle.zeros([
- translation_vector.shape[0],
- ], paddle.float32),
- paddle.ones([
- translation_vector.shape[0],
- ], paddle.float32),
- paddle.squeeze(t[:, 0], axis=-1),
- paddle.squeeze(t[:, 1], axis=-1),
- paddle.squeeze(t[:, 2], axis=-1),
+ paddle.zeros([translation_vector.shape[0], ], paddle.float32),
+ paddle.ones([translation_vector.shape[0], ], paddle.float32),
+ paddle.squeeze(
+ t[:, 0], axis=-1),
+ paddle.squeeze(
+ t[:, 1], axis=-1),
+ paddle.squeeze(
+ t[:, 2], axis=-1),
])
gather_index = paddle.to_tensor([
[1],
@@ -174,21 +174,19 @@ def rot_from_axisangle(vec):
zxC = z * xC
gather_object = paddle.stack([
- paddle.squeeze(x * xC + ca, axis=(-1, -2)),
- paddle.squeeze(xyC - zs, axis=(-1, -2)),
- paddle.squeeze(zxC + ys, axis=(-1, -2)),
- paddle.squeeze(xyC + zs, axis=(-1, -2)),
- paddle.squeeze(y * yC + ca, axis=(-1, -2)),
- paddle.squeeze(yzC - xs, axis=(-1, -2)),
- paddle.squeeze(zxC - ys, axis=(-1, -2)),
- paddle.squeeze(yzC + xs, axis=(-1, -2)),
- paddle.squeeze(z * zC + ca, axis=(-1, -2)),
- paddle.ones([
- vec.shape[0],
- ], dtype=paddle.float32),
- paddle.zeros([
- vec.shape[0],
- ], dtype=paddle.float32)
+ paddle.squeeze(
+ x * xC + ca, axis=(-1, -2)), paddle.squeeze(
+ xyC - zs, axis=(-1, -2)), paddle.squeeze(
+ zxC + ys, axis=(-1, -2)), paddle.squeeze(
+ xyC + zs, axis=(-1, -2)), paddle.squeeze(
+ y * yC + ca, axis=(-1, -2)), paddle.squeeze(
+ yzC - xs, axis=(-1, -2)), paddle.squeeze(
+ zxC - ys, axis=(-1, -2)), paddle.squeeze(
+ yzC + xs, axis=(-1, -2)),
+ paddle.squeeze(
+ z * zC + ca, axis=(-1, -2)), paddle.ones(
+ [vec.shape[0], ], dtype=paddle.float32), paddle.zeros(
+ [vec.shape[0], ], dtype=paddle.float32)
])
gather_index = paddle.to_tensor([
[0],
@@ -226,12 +224,10 @@ def get_smooth_loss(disp, img):
grad_disp_x = paddle.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:])
grad_disp_y = paddle.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :])
- grad_img_x = paddle.mean(paddle.abs(img[:, :, :, :-1] - img[:, :, :, 1:]),
- 1,
- keepdim=True)
- grad_img_y = paddle.mean(paddle.abs(img[:, :, :-1, :] - img[:, :, 1:, :]),
- 1,
- keepdim=True)
+ grad_img_x = paddle.mean(
+ paddle.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)
+ grad_img_y = paddle.mean(
+ paddle.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)
grad_disp_x *= paddle.exp(-grad_img_x)
grad_disp_y *= paddle.exp(-grad_img_y)
@@ -241,23 +237,21 @@ def get_smooth_loss(disp, img):
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
- return nn.Conv2D(in_planes,
- out_planes,
- kernel_size=3,
- stride=stride,
- padding=dilation,
- groups=groups,
- bias_attr=False,
- dilation=dilation)
+ return nn.Conv2D(
+ in_planes,
+ out_planes,
+ kernel_size=3,
+ stride=stride,
+ padding=dilation,
+ groups=groups,
+ bias_attr=False,
+ dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
- return nn.Conv2D(in_planes,
- out_planes,
- kernel_size=1,
- stride=stride,
- bias_attr=False)
+ return nn.Conv2D(
+ in_planes, out_planes, kernel_size=1, stride=stride, bias_attr=False)
def resnet_multiimage_input(num_layers, num_input_images=1):
@@ -272,10 +266,8 @@ def resnet_multiimage_input(num_layers, num_input_images=1):
block_type = {18: BasicBlock, 50: Bottleneck}[num_layers]
- model = ResNetMultiImageInput(block_type,
- num_layers,
- blocks,
- num_input_images=num_input_images)
+ model = ResNetMultiImageInput(
+ block_type, num_layers, blocks, num_input_images=num_input_images)
model.init_weights()
return model
@@ -283,6 +275,7 @@ def resnet_multiimage_input(num_layers, num_input_images=1):
class ConvBlock(nn.Layer):
"""Layer to perform a convolution followed by ELU
"""
+
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
@@ -298,6 +291,7 @@ def forward(self, x):
class Conv3x3(nn.Layer):
"""Layer to pad and convolve input
"""
+
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
@@ -316,6 +310,7 @@ def forward(self, x):
class BackprojectDepth(nn.Layer):
"""Layer to transform a depth image into a point cloud
"""
+
def __init__(self, batch_size, height, width):
super(BackprojectDepth, self).__init__()
@@ -323,12 +318,11 @@ def __init__(self, batch_size, height, width):
self.height = height
self.width = width
- meshgrid = np.meshgrid(range(self.width),
- range(self.height),
- indexing='xy')
+ meshgrid = np.meshgrid(
+ range(self.width), range(self.height), indexing='xy')
id_coords = np.stack(meshgrid, axis=0).astype(np.float32)
- self.id_coords = self.create_parameter(shape=list(id_coords.shape),
- dtype=paddle.float32)
+ self.id_coords = self.create_parameter(
+ shape=list(id_coords.shape), dtype=paddle.float32)
self.id_coords.set_value(id_coords)
self.add_parameter("id_coords", self.id_coords)
self.id_coords.stop_gradient = True
@@ -341,9 +335,7 @@ def __init__(self, batch_size, height, width):
pix_coords = paddle.unsqueeze(
paddle.stack([
- self.id_coords[0].reshape([
- -1,
- ]), self.id_coords[1].reshape([
+ self.id_coords[0].reshape([-1, ]), self.id_coords[1].reshape([
-1,
])
], 0), 0)
@@ -365,6 +357,7 @@ def forward(self, depth, inv_K):
class Project3D(nn.Layer):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
+
def __init__(self, batch_size, height, width, eps=1e-7):
super(Project3D, self).__init__()
@@ -378,8 +371,8 @@ def forward(self, points, K, T):
cam_points = paddle.matmul(P, points)
- pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(1) +
- self.eps)
+ pix_coords = cam_points[:, :2, :] / (
+ cam_points[:, 2, :].unsqueeze(1) + self.eps)
pix_coords = pix_coords.reshape(
[self.batch_size, 2, self.height, self.width])
pix_coords = pix_coords.transpose([0, 2, 3, 1])
@@ -392,6 +385,7 @@ def forward(self, points, K, T):
class SSIM(nn.Layer):
"""Layer to compute the SSIM loss between a pair of images
"""
+
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2D(3, 1, exclusive=False)
@@ -426,15 +420,17 @@ class ResNetMultiImageInput(ResNet):
"""Constructs a resnet model with varying number of input images.
Adapted from https://github.com/pypaddle/vision/blob/master/paddlevision/models/resnet.py
"""
+
def __init__(self, block, depth, layers, num_input_images=1):
super(ResNetMultiImageInput, self).__init__(block, depth)
self.inplanes = 64
- self.conv1 = nn.Conv2D(num_input_images * 3,
- 64,
- kernel_size=7,
- stride=2,
- padding=3,
- bias_attr=False)
+ self.conv1 = nn.Conv2D(
+ num_input_images * 3,
+ 64,
+ kernel_size=7,
+ stride=2,
+ padding=3,
+ bias_attr=False)
self.bn1 = nn.BatchNorm2D(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
@@ -446,9 +442,8 @@ def __init__(self, block, depth, layers, num_input_images=1):
def init_weights(self):
for layer in self.sublayers(include_self=True):
if isinstance(layer, nn.Conv2D):
- kaiming_normal_(layer.weight,
- mode='fan_out',
- nonlinearity='relu')
+ kaiming_normal_(
+ layer.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(layer, nn.BatchNorm2D):
ones_(layer.weight)
zeros_(layer.bias)
@@ -471,6 +466,7 @@ class ConvBNLayer(nn.Layer):
are explicit declared in the ```init_weights``` method.
"""
+
def __init__(self,
in_channels,
out_channels,
@@ -480,13 +476,14 @@ def __init__(self,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
- self._conv = Conv2D(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=(kernel_size - 1) // 2,
- groups=groups,
- bias_attr=False)
+ self._conv = Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ groups=groups,
+ bias_attr=False)
self._act = act
@@ -657,8 +654,8 @@ def forward(self, input_features):
x = paddle.concat(x, 1)
x = self.convs[("upconv", i, 1)](x)
if i in self.scales:
- outputs[("disp", i)] = self.sigmoid(self.convs[("dispconv",
- i)](x))
+ outputs[("disp",
+ i)] = self.sigmoid(self.convs[("dispconv", i)](x))
return outputs
@@ -716,6 +713,7 @@ def forward(self, input_features):
class ResnetEncoder(nn.Layer):
"""Pypaddle module for a resnet encoder
"""
+
def __init__(self, num_layers, pretrained=False, num_input_images=1):
super(ResnetEncoder, self).__init__()
@@ -730,8 +728,8 @@ def __init__(self, num_layers, pretrained=False, num_input_images=1):
}
if num_layers not in resnets:
- raise ValueError(
- "{} is not a valid number of resnet layers".format(num_layers))
+ raise ValueError("{} is not a valid number of resnet layers".format(
+ num_layers))
if num_input_images > 1:
self.encoder = resnet_multiimage_input(num_layers, pretrained,
@@ -745,12 +743,8 @@ def __init__(self, num_layers, pretrained=False, num_input_images=1):
######################################
# night public first conv
######################################
- self.conv1 = nn.Conv2D(3,
- 64,
- kernel_size=7,
- stride=2,
- padding=3,
- bias_attr=False)
+ self.conv1 = nn.Conv2D(
+ 3, 64, kernel_size=7, stride=2, padding=3, bias_attr=False)
self.bn1 = nn.BatchNorm2D(64)
self.relu = nn.ReLU() # NOTE
@@ -772,36 +766,41 @@ def __init__(self, num_layers, pretrained=False, num_input_images=1):
######################################
# shared decoder (small decoder), use a simple de-conv to upsample the features with no skip connection
######################################
- self.convt5 = convt_bn_relu(in_channels=512,
- out_channels=256,
- kernel_size=3,
- stride=2,
- padding=1,
- output_padding=1)
- self.convt4 = convt_bn_relu(in_channels=256,
- out_channels=128,
- kernel_size=3,
- stride=2,
- padding=1,
- output_padding=1)
- self.convt3 = convt_bn_relu(in_channels=128,
- out_channels=64,
- kernel_size=3,
- stride=2,
- padding=1,
- output_padding=1)
- self.convt2 = convt_bn_relu(in_channels=64,
- out_channels=64,
- kernel_size=3,
- stride=2,
- padding=1,
- output_padding=1)
- self.convt1 = convt_bn_relu(in_channels=64,
- out_channels=64,
- kernel_size=3,
- stride=2,
- padding=1,
- output_padding=1)
+ self.convt5 = convt_bn_relu(
+ in_channels=512,
+ out_channels=256,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ output_padding=1)
+ self.convt4 = convt_bn_relu(
+ in_channels=256,
+ out_channels=128,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ output_padding=1)
+ self.convt3 = convt_bn_relu(
+ in_channels=128,
+ out_channels=64,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ output_padding=1)
+ self.convt2 = convt_bn_relu(
+ in_channels=64,
+ out_channels=64,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ output_padding=1)
+ self.convt1 = convt_bn_relu(
+ in_channels=64,
+ out_channels=64,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ output_padding=1)
self.convtf = nn.Conv2D(64, 3, kernel_size=1, stride=1, padding=0)
def forward(self, input_image, is_night):
@@ -882,6 +881,7 @@ def forward(self, input_image, is_night):
class ResnetEncoder_pose(nn.Layer):
"""Pypaddle module for a resnet encoder
"""
+
def __init__(self, num_layers, pretrained=False, num_input_images=1):
super(ResnetEncoder_pose, self).__init__()
@@ -895,8 +895,8 @@ def __init__(self, num_layers, pretrained=False, num_input_images=1):
}
if num_layers not in resnets:
- raise ValueError(
- "{} is not a valid number of resnet layers".format(num_layers))
+ raise ValueError("{} is not a valid number of resnet layers".format(
+ num_layers))
if num_input_images > 1:
self.encoder = resnet_multiimage_input(num_layers, num_input_images)
@@ -971,9 +971,10 @@ def __init__(self,
if self.pose_model_type == "separate_resnet":
self.pose_encoder = ResnetEncoder_pose(
self.num_layers, num_input_images=self.num_pose_frames)
- self.pose = PoseDecoder(self.pose_encoder.num_ch_enc,
- num_input_features=1,
- num_frames_to_predict_for=2)
+ self.pose = PoseDecoder(
+ self.pose_encoder.num_ch_enc,
+ num_input_features=1,
+ num_frames_to_predict_for=2)
self.backproject_depth = {}
self.project_3d = {}
@@ -981,8 +982,8 @@ def __init__(self,
h = self.height // (2**scale)
w = self.width // (2**scale)
- self.backproject_depth[scale] = BackprojectDepth(
- self.batch_size, h, w)
+ self.backproject_depth[scale] = BackprojectDepth(self.batch_size, h,
+ w)
self.project_3d[scale] = Project3D(batch_size, h, w)
def init_weights(self):
@@ -1072,8 +1073,9 @@ def predict_poses(self, inputs, is_night):
if self.pose_model_type == "separate_resnet":
pose_inputs = [
- self.pose_encoder(paddle.concat(pose_inputs,
- axis=1))
+ self.pose_encoder(
+ paddle.concat(
+ pose_inputs, axis=1))
]
axisangle, translation = self.pose(pose_inputs)
@@ -1098,9 +1100,8 @@ def generate_images_pred(self, inputs, outputs, is_night):
if self.v1_multiscale:
source_scale = scale
else:
- disp = F.interpolate(disp, [height, width],
- mode="bilinear",
- align_corners=False)
+ disp = F.interpolate(
+ disp, [height, width], mode="bilinear", align_corners=False)
source_scale = 0
_, depth = disp_to_depth(disp, self.min_depth, self.max_depth)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/agcn.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/agcn.py
old mode 100644
new mode 100755
index 9f870c66b..e54ad029d
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/agcn.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/agcn.py
@@ -21,13 +21,15 @@
class GCN(nn.Layer):
def __init__(self, in_channels, out_channels, vertex_nums=25, stride=1):
super(GCN, self).__init__()
- self.conv1 = nn.Conv2D(in_channels=in_channels,
- out_channels=3 * out_channels,
- kernel_size=1,
- stride=1)
- self.conv2 = nn.Conv2D(in_channels=vertex_nums * 3,
- out_channels=vertex_nums,
- kernel_size=1)
+ self.conv1 = nn.Conv2D(
+ in_channels=in_channels,
+ out_channels=3 * out_channels,
+ kernel_size=1,
+ stride=1)
+ self.conv2 = nn.Conv2D(
+ in_channels=vertex_nums * 3,
+ out_channels=vertex_nums,
+ kernel_size=1)
def forward(self, x):
# x --- N,C,T,V
@@ -55,23 +57,24 @@ def __init__(self,
self.out_channels = out_channels
self.bn_res = nn.BatchNorm2D(out_channels)
- self.conv_res = nn.Conv2D(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- stride=(stride, 1))
+ self.conv_res = nn.Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ stride=(stride, 1))
self.gcn = GCN(in_channels=in_channels,
out_channels=out_channels,
vertex_nums=vertex_nums)
self.tcn = nn.Sequential(
nn.BatchNorm2D(out_channels),
nn.ReLU(),
- nn.Conv2D(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=(temporal_size, 1),
- padding=((temporal_size - 1) // 2, 0),
- stride=(stride, 1)),
- nn.BatchNorm2D(out_channels),
- )
+ nn.Conv2D(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=(temporal_size, 1),
+ padding=((temporal_size - 1) // 2, 0),
+ stride=(stride, 1)),
+ nn.BatchNorm2D(out_channels), )
def forward(self, x):
if self.residual:
@@ -92,23 +95,35 @@ class AGCN(nn.Layer):
Args:
in_channels: int, channels of vertex coordinate. 2 for (x,y), 3 for (x,y,z). Default 2.
"""
+
def __init__(self, in_channels=2, **kwargs):
super(AGCN, self).__init__()
self.data_bn = nn.BatchNorm1D(25 * 2)
self.agcn = nn.Sequential(
- Block(in_channels=in_channels,
- out_channels=64,
- residual=False,
- **kwargs), Block(in_channels=64, out_channels=64, **kwargs),
- Block(in_channels=64, out_channels=64, **kwargs),
- Block(in_channels=64, out_channels=64, **kwargs),
- Block(in_channels=64, out_channels=128, stride=2, **kwargs),
- Block(in_channels=128, out_channels=128, **kwargs),
- Block(in_channels=128, out_channels=128, **kwargs),
- Block(in_channels=128, out_channels=256, stride=2, **kwargs),
- Block(in_channels=256, out_channels=256, **kwargs),
- Block(in_channels=256, out_channels=256, **kwargs))
+ Block(
+ in_channels=in_channels,
+ out_channels=64,
+ residual=False,
+ **kwargs),
+ Block(
+ in_channels=64, out_channels=64, **kwargs),
+ Block(
+ in_channels=64, out_channels=64, **kwargs),
+ Block(
+ in_channels=64, out_channels=64, **kwargs),
+ Block(
+ in_channels=64, out_channels=128, stride=2, **kwargs),
+ Block(
+ in_channels=128, out_channels=128, **kwargs),
+ Block(
+ in_channels=128, out_channels=128, **kwargs),
+ Block(
+ in_channels=128, out_channels=256, stride=2, **kwargs),
+ Block(
+ in_channels=256, out_channels=256, **kwargs),
+ Block(
+ in_channels=256, out_channels=256, **kwargs))
self.pool = nn.AdaptiveAvgPool2D(output_size=(1, 1))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/bmn.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/bmn.py
old mode 100644
new mode 100755
index 200d1920a..91b5d361f
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/bmn.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/bmn.py
@@ -77,9 +77,9 @@ def get_interp1d_mask(tscale, dscale, prop_boundary_ratio, num_sample,
def init_params(name, in_channels, kernel_size):
fan_in = in_channels * kernel_size * 1
k = 1. / math.sqrt(fan_in)
- param_attr = ParamAttr(name=name,
- initializer=paddle.nn.initializer.Uniform(low=-k,
- high=k))
+ param_attr = ParamAttr(
+ name=name, initializer=paddle.nn.initializer.Uniform(
+ low=-k, high=k))
return param_attr
@@ -96,14 +96,13 @@ class BMN(paddle.nn.Layer):
"""
def __init__(
- self,
- tscale,
- dscale,
- prop_boundary_ratio,
- num_sample,
- num_sample_perbin,
- feat_dim=400,
- ):
+ self,
+ tscale,
+ dscale,
+ prop_boundary_ratio,
+ num_sample,
+ num_sample_perbin,
+ feat_dim=400, ):
super(BMN, self).__init__()
#init config
@@ -191,9 +190,9 @@ def __init__(
self.p_conv1_act = paddle.nn.ReLU()
# init to speed up
- sample_mask = get_interp1d_mask(self.tscale, self.dscale,
- self.prop_boundary_ratio,
- self.num_sample, self.num_sample_perbin)
+ sample_mask = get_interp1d_mask(
+ self.tscale, self.dscale, self.prop_boundary_ratio,
+ self.num_sample, self.num_sample_perbin)
self.sample_mask = paddle.to_tensor(sample_mask)
self.sample_mask.stop_gradient = True
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/cfbi.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/cfbi.py
old mode 100644
new mode 100755
index 5fbf044b7..8c9cb89dd
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/cfbi.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/cfbi.py
@@ -22,30 +22,29 @@
class FPN(nn.Layer):
"""FPN Layer"""
+
def __init__(self, in_dim_4x, in_dim_8x, in_dim_16x, out_dim):
super(FPN, self).__init__()
self.toplayer = self._make_layer(in_dim_16x, out_dim)
self.latlayer1 = self._make_layer(in_dim_8x, out_dim)
self.latlayer2 = self._make_layer(in_dim_4x, out_dim)
- self.smooth1 = self._make_layer(out_dim,
- out_dim,
- kernel_size=3,
- padding=1)
- self.smooth2 = self._make_layer(out_dim,
- out_dim,
- kernel_size=3,
- padding=1)
+ self.smooth1 = self._make_layer(
+ out_dim, out_dim, kernel_size=3, padding=1)
+ self.smooth2 = self._make_layer(
+ out_dim, out_dim, kernel_size=3, padding=1)
def _make_layer(self, in_dim, out_dim, kernel_size=1, padding=0):
return nn.Sequential(
- nn.Conv2D(in_dim,
- out_dim,
- kernel_size=kernel_size,
- stride=1,
- padding=padding,
- bias_attr=False),
- nn.GroupNorm(num_groups=32, num_channels=out_dim))
+ nn.Conv2D(
+ in_dim,
+ out_dim,
+ kernel_size=kernel_size,
+ stride=1,
+ padding=padding,
+ bias_attr=False),
+ nn.GroupNorm(
+ num_groups=32, num_channels=out_dim))
def forward(self, x_4x, x_8x, x_16x):
""" forward function"""
@@ -67,6 +66,7 @@ def forward(self, x_4x, x_8x, x_16x):
@BACKBONES.register()
class CFBI(nn.Layer):
"""CFBI plus backbone"""
+
def __init__(self,
backbone='resnet',
freeze_bn=True,
@@ -75,7 +75,8 @@ def __init__(self,
model_semantic_embedding_dim=256): #,epsilon=1e-05):
super(CFBI, self).__init__()
#self.epsilon = epsilon
- self.feature_extracter = DeepLab(backbone=backbone, freeze_bn=freeze_bn)
+ self.feature_extracter = DeepLab(
+ backbone=backbone, freeze_bn=freeze_bn)
self.fpn = FPN(in_dim_4x=model_aspp_outdim,
in_dim_8x=in_dim_8x,
in_dim_16x=model_aspp_outdim,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/deeplab.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/deeplab.py
old mode 100644
new mode 100755
index c566205ac..70503792a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/deeplab.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/deeplab.py
@@ -26,6 +26,7 @@ class FrozenBatchNorm2D(nn.Layer):
BatchNorm2D where the batch statistics and the affine parameters
are fixed
"""
+
def __init__(self, n, epsilon=1e-5):
super(FrozenBatchNorm2D, self).__init__()
x1 = paddle.ones([n])
@@ -65,18 +66,17 @@ def __init__(self,
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2D(inplanes, planes, kernel_size=1, bias_attr=False)
self.bn1 = BatchNorm(planes)
- self.conv2 = nn.Conv2D(planes,
- planes,
- kernel_size=3,
- stride=stride,
- dilation=dilation,
- padding=dilation,
- bias_attr=False)
+ self.conv2 = nn.Conv2D(
+ planes,
+ planes,
+ kernel_size=3,
+ stride=stride,
+ dilation=dilation,
+ padding=dilation,
+ bias_attr=False)
self.bn2 = BatchNorm(planes)
- self.conv3 = nn.Conv2D(planes,
- planes * 4,
- kernel_size=1,
- bias_attr=False)
+ self.conv3 = nn.Conv2D(
+ planes, planes * 4, kernel_size=1, bias_attr=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU()
self.downsample = downsample
@@ -126,40 +126,40 @@ def __init__(self,
raise NotImplementedError
# Modules
- self.conv1 = nn.Conv2D(3,
- 64,
- kernel_size=7,
- stride=2,
- padding=3,
- bias_attr=False)
+ self.conv1 = nn.Conv2D(
+ 3, 64, kernel_size=7, stride=2, padding=3, bias_attr=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
- self.layer1 = self._make_layer(block,
- 64,
- layers[0],
- stride=strides[0],
- dilation=dilations[0],
- BatchNorm=BatchNorm)
- self.layer2 = self._make_layer(block,
- 128,
- layers[1],
- stride=strides[1],
- dilation=dilations[1],
- BatchNorm=BatchNorm)
- self.layer3 = self._make_layer(block,
- 256,
- layers[2],
- stride=strides[2],
- dilation=dilations[2],
- BatchNorm=BatchNorm)
- self.layer4 = self._make_MG_unit(block,
- 512,
- blocks=blocks,
- stride=strides[3],
- dilation=dilations[3],
- BatchNorm=BatchNorm)
+ self.layer1 = self._make_layer(
+ block,
+ 64,
+ layers[0],
+ stride=strides[0],
+ dilation=dilations[0],
+ BatchNorm=BatchNorm)
+ self.layer2 = self._make_layer(
+ block,
+ 128,
+ layers[1],
+ stride=strides[1],
+ dilation=dilations[1],
+ BatchNorm=BatchNorm)
+ self.layer3 = self._make_layer(
+ block,
+ 256,
+ layers[2],
+ stride=strides[2],
+ dilation=dilations[2],
+ BatchNorm=BatchNorm)
+ self.layer4 = self._make_MG_unit(
+ block,
+ 512,
+ blocks=blocks,
+ stride=strides[3],
+ dilation=dilations[3],
+ BatchNorm=BatchNorm)
self._init_weight()
def _make_layer(self,
@@ -172,13 +172,13 @@ def _make_layer(self,
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
- nn.Conv2D(self.inplanes,
- planes * block.expansion,
- kernel_size=1,
- stride=stride,
- bias_attr=False),
- BatchNorm(planes * block.expansion),
- )
+ nn.Conv2D(
+ self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias_attr=False),
+ BatchNorm(planes * block.expansion), )
layers = []
layers.append(
@@ -187,10 +187,11 @@ def _make_layer(self,
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
- block(self.inplanes,
- planes,
- dilation=dilation,
- BatchNorm=BatchNorm))
+ block(
+ self.inplanes,
+ planes,
+ dilation=dilation,
+ BatchNorm=BatchNorm))
return nn.Sequential(*layers)
@@ -204,30 +205,32 @@ def _make_MG_unit(self,
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
- nn.Conv2D(self.inplanes,
- planes * block.expansion,
- kernel_size=1,
- stride=stride,
- bias_attr=False),
- BatchNorm(planes * block.expansion),
- )
+ nn.Conv2D(
+ self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias_attr=False),
+ BatchNorm(planes * block.expansion), )
layers = []
layers.append(
- block(self.inplanes,
- planes,
- stride,
- dilation=blocks[0] * dilation,
- downsample=downsample,
- BatchNorm=BatchNorm))
+ block(
+ self.inplanes,
+ planes,
+ stride,
+ dilation=blocks[0] * dilation,
+ downsample=downsample,
+ BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, len(blocks)):
layers.append(
- block(self.inplanes,
- planes,
- stride=1,
- dilation=blocks[i] * dilation,
- BatchNorm=BatchNorm))
+ block(
+ self.inplanes,
+ planes,
+ stride=1,
+ dilation=blocks[i] * dilation,
+ BatchNorm=BatchNorm))
return nn.Sequential(*layers)
@@ -261,13 +264,14 @@ class _ASPPModule(nn.Layer):
def __init__(self, inplanes, planes, kernel_size, padding, dilation,
BatchNorm):
super(_ASPPModule, self).__init__()
- self.atrous_conv = nn.Conv2D(inplanes,
- planes,
- kernel_size=kernel_size,
- stride=1,
- padding=padding,
- dilation=dilation,
- bias_attr=False)
+ self.atrous_conv = nn.Conv2D(
+ inplanes,
+ planes,
+ kernel_size=kernel_size,
+ stride=1,
+ padding=padding,
+ dilation=dilation,
+ bias_attr=False)
self.bn = BatchNorm(planes)
self.relu = nn.ReLU()
@@ -304,35 +308,41 @@ def __init__(self, backbone, output_stride, BatchNorm):
else:
raise NotImplementedError
- self.aspp1 = _ASPPModule(inplanes,
- 256,
- 1,
- padding=0,
- dilation=dilations[0],
- BatchNorm=BatchNorm)
- self.aspp2 = _ASPPModule(inplanes,
- 256,
- 3,
- padding=dilations[1],
- dilation=dilations[1],
- BatchNorm=BatchNorm)
- self.aspp3 = _ASPPModule(inplanes,
- 256,
- 3,
- padding=dilations[2],
- dilation=dilations[2],
- BatchNorm=BatchNorm)
- self.aspp4 = _ASPPModule(inplanes,
- 256,
- 3,
- padding=dilations[3],
- dilation=dilations[3],
- BatchNorm=BatchNorm)
+ self.aspp1 = _ASPPModule(
+ inplanes,
+ 256,
+ 1,
+ padding=0,
+ dilation=dilations[0],
+ BatchNorm=BatchNorm)
+ self.aspp2 = _ASPPModule(
+ inplanes,
+ 256,
+ 3,
+ padding=dilations[1],
+ dilation=dilations[1],
+ BatchNorm=BatchNorm)
+ self.aspp3 = _ASPPModule(
+ inplanes,
+ 256,
+ 3,
+ padding=dilations[2],
+ dilation=dilations[2],
+ BatchNorm=BatchNorm)
+ self.aspp4 = _ASPPModule(
+ inplanes,
+ 256,
+ 3,
+ padding=dilations[3],
+ dilation=dilations[3],
+ BatchNorm=BatchNorm)
self.global_avg_pool = nn.Sequential(
nn.AdaptiveAvgPool2D((1, 1)),
- nn.Conv2D(inplanes, 256, 1, stride=1, bias_attr=False),
- BatchNorm(256), nn.ReLU())
+ nn.Conv2D(
+ inplanes, 256, 1, stride=1, bias_attr=False),
+ BatchNorm(256),
+ nn.ReLU())
self.conv1 = nn.Conv2D(1280, 256, 1, bias_attr=False)
self.bn1 = BatchNorm(256)
self.relu = nn.ReLU()
@@ -345,10 +355,8 @@ def forward(self, x):
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
- x5 = F.interpolate(x5,
- size=x4.shape[2:],
- mode='bilinear',
- align_corners=True)
+ x5 = F.interpolate(
+ x5, size=x4.shape[2:], mode='bilinear', align_corners=True)
x = paddle.concat(x=[x1, x2, x3, x4, x5], axis=1)
x = self.conv1(x)
@@ -381,19 +389,15 @@ def __init__(self, backbone, BatchNorm):
self.relu = nn.ReLU()
self.last_conv = nn.Sequential(
- nn.Conv2D(304,
- 256,
- kernel_size=3,
- stride=1,
- padding=1,
- bias_attr=False), BatchNorm(256), nn.ReLU(),
+ nn.Conv2D(
+ 304, 256, kernel_size=3, stride=1, padding=1, bias_attr=False),
+ BatchNorm(256),
+ nn.ReLU(),
nn.Sequential(),
- nn.Conv2D(256,
- 256,
- kernel_size=3,
- stride=1,
- padding=1,
- bias_attr=False), BatchNorm(256), nn.ReLU(),
+ nn.Conv2D(
+ 256, 256, kernel_size=3, stride=1, padding=1, bias_attr=False),
+ BatchNorm(256),
+ nn.ReLU(),
nn.Sequential())
self._init_weight()
@@ -403,10 +407,11 @@ def forward(self, x, low_level_feat):
low_level_feat = self.bn1(low_level_feat)
low_level_feat = self.relu(low_level_feat)
- x = F.interpolate(x,
- size=low_level_feat.shape[2:],
- mode='bilinear',
- align_corners=True)
+ x = F.interpolate(
+ x,
+ size=low_level_feat.shape[2:],
+ mode='bilinear',
+ align_corners=True)
x = paddle.concat(x=[x, low_level_feat], axis=1)
x = self.last_conv(x)
@@ -423,6 +428,7 @@ def _init_weight(self):
class DeepLab(nn.Layer):
"""DeepLab model for segmentation"""
+
def __init__(self, backbone='resnet', output_stride=16, freeze_bn=True):
super(DeepLab, self).__init__()
@@ -432,10 +438,11 @@ def __init__(self, backbone='resnet', output_stride=16, freeze_bn=True):
else:
BatchNorm = nn.BatchNorm2D
- self.backbone = ResNet(Bottleneck, [3, 4, 23, 3],
- output_stride,
- BatchNorm,
- pretrained=True)
+ self.backbone = ResNet(
+ Bottleneck, [3, 4, 23, 3],
+ output_stride,
+ BatchNorm,
+ pretrained=True)
self.aspp = ASPP(backbone, output_stride, BatchNorm)
self.decoder = Decoder(backbone, BatchNorm)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet.py
old mode 100644
new mode 100755
index 2f07991a2..0ab570bcb
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet.py
@@ -42,6 +42,7 @@ class ConvBNLayer(nn.Layer):
Note: weight and bias initialization include initialize values and name the restored parameters, values initialization are explicit declared in the ```init_weights``` method.
"""
+
def __init__(self,
in_channels,
out_channels,
@@ -51,14 +52,15 @@ def __init__(self,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
- self._conv = Conv2D(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=(kernel_size - 1) // 2,
- groups=groups,
- weight_attr=ParamAttr(name=name + "_weights"),
- bias_attr=False)
+ self._conv = Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(name=name + "_weights"),
+ bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
@@ -66,10 +68,10 @@ def __init__(self,
self._act = act
- self._batch_norm = BatchNorm2D(out_channels,
- weight_attr=ParamAttr(name=bn_name +
- "_scale"),
- bias_attr=ParamAttr(bn_name + "_offset"))
+ self._batch_norm = BatchNorm2D(
+ out_channels,
+ weight_attr=ParamAttr(name=bn_name + "_scale"),
+ bias_attr=ParamAttr(bn_name + "_offset"))
def forward(self, inputs):
y = self._conv(inputs)
@@ -87,30 +89,34 @@ def __init__(self,
shortcut=True,
name=None):
super(BottleneckBlock, self).__init__()
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- act="relu",
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- stride=stride,
- act="relu",
- name=name + "_branch2b")
-
- self.conv2 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- act=None,
- name=name + "_branch2c")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ act="relu",
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=stride,
+ act="relu",
+ name=name + "_branch2b")
+
+ self.conv2 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ act=None,
+ name=name + "_branch2c")
if not shortcut:
- self.short = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- stride=stride,
- name=name + "_branch1")
+ self.short = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ stride=stride,
+ name=name + "_branch1")
self.shortcut = shortcut
@@ -135,24 +141,27 @@ def __init__(self,
name=None):
super(BasicBlock, self).__init__()
self.stride = stride
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- filter_size=3,
- stride=stride,
- act="relu",
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- filter_size=3,
- act=None,
- name=name + "_branch2b")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ filter_size=3,
+ stride=stride,
+ act="relu",
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ filter_size=3,
+ act=None,
+ name=name + "_branch2b")
if not shortcut:
- self.short = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- filter_size=1,
- stride=stride,
- name=name + "_branch1")
+ self.short = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ filter_size=1,
+ stride=stride,
+ name=name + "_branch1")
self.shortcut = shortcut
@@ -177,6 +186,7 @@ class ResNet(nn.Layer):
depth (int): Depth of resnet model.
pretrained (str): pretrained model. Default: None.
"""
+
def __init__(self, depth, pretrained=None):
super(ResNet, self).__init__()
self.pretrained = pretrained
@@ -199,12 +209,13 @@ def __init__(self, depth, pretrained=None):
in_channels = [64, 256, 512, 1024]
out_channels = [64, 128, 256, 512]
- self.conv = ConvBNLayer(in_channels=3,
- out_channels=64,
- kernel_size=7,
- stride=2,
- act="relu",
- name="conv1")
+ self.conv = ConvBNLayer(
+ in_channels=3,
+ out_channels=64,
+ kernel_size=7,
+ stride=2,
+ act="relu",
+ name="conv1")
self.pool2D_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
self.block_list = []
@@ -239,12 +250,13 @@ def __init__(self, depth, pretrained=None):
conv_name = "res" + str(block + 2) + chr(97 + i)
basic_block = self.add_sublayer(
conv_name,
- BasicBlock(in_channels=in_channels[block]
- if i == 0 else out_channels[block],
- out_channels=out_channels[block],
- stride=2 if i == 0 and block != 0 else 1,
- shortcut=shortcut,
- name=conv_name))
+ BasicBlock(
+ in_channels=in_channels[block]
+ if i == 0 else out_channels[block],
+ out_channels=out_channels[block],
+ stride=2 if i == 0 and block != 0 else 1,
+ shortcut=shortcut,
+ name=conv_name))
self.block_list.append(basic_block)
shortcut = True
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_slowfast.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_slowfast.py
old mode 100644
new mode 100755
index a67915946..3ac9469ed
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_slowfast.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_slowfast.py
@@ -45,6 +45,7 @@ class BottleneckTransform(paddle.nn.Layer):
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel.
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -99,10 +100,11 @@ def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups,
padding=[int(self.temp_kernel_size // 2), 0, 0],
weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
bias_attr=False)
- self.a_bn = self.norm_module(num_features=dim_inner,
- epsilon=self._eps,
- weight_attr=get_bn_param_attr(),
- bias_attr=get_bn_param_attr(bn_weight=0.0))
+ self.a_bn = self.norm_module(
+ num_features=dim_inner,
+ epsilon=self._eps,
+ weight_attr=get_bn_param_attr(),
+ bias_attr=get_bn_param_attr(bn_weight=0.0))
# 1x3x3, BN, ReLU.
fan = (dim_inner) * (1 * 3 * 3)
@@ -118,10 +120,11 @@ def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups,
dilation=[1, dilation, dilation],
weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
bias_attr=False)
- self.b_bn = self.norm_module(num_features=dim_inner,
- epsilon=self._eps,
- weight_attr=get_bn_param_attr(),
- bias_attr=get_bn_param_attr(bn_weight=0.0))
+ self.b_bn = self.norm_module(
+ num_features=dim_inner,
+ epsilon=self._eps,
+ weight_attr=get_bn_param_attr(),
+ bias_attr=get_bn_param_attr(bn_weight=0.0))
# 1x1x1, BN.
fan = (dim_out) * (1 * 1 * 1)
@@ -162,6 +165,7 @@ class ResBlock(paddle.nn.Layer):
"""
Residual block.
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -211,21 +215,19 @@ def __init__(self,
num_groups,
stride_1x1,
inplace_relu,
- dilation,
- )
+ dilation, )
def _construct(
- self,
- dim_in,
- dim_out,
- temp_kernel_size,
- stride,
- dim_inner,
- num_groups,
- stride_1x1,
- inplace_relu,
- dilation,
- ):
+ self,
+ dim_in,
+ dim_out,
+ temp_kernel_size,
+ stride,
+ dim_inner,
+ num_groups,
+ stride_1x1,
+ inplace_relu,
+ dilation, ):
# Use skip connection with projection if dim or res change.
if (dim_in != dim_out) or (stride != 1):
fan = (dim_out) * (1 * 1 * 1)
@@ -245,16 +247,17 @@ def _construct(
weight_attr=get_bn_param_attr(),
bias_attr=get_bn_param_attr(bn_weight=0.0))
- self.branch2 = BottleneckTransform(dim_in,
- dim_out,
- temp_kernel_size,
- stride,
- dim_inner,
- num_groups,
- stride_1x1=stride_1x1,
- inplace_relu=inplace_relu,
- dilation=dilation,
- norm_module=self.norm_module)
+ self.branch2 = BottleneckTransform(
+ dim_in,
+ dim_out,
+ temp_kernel_size,
+ stride,
+ dim_inner,
+ num_groups,
+ stride_1x1=stride_1x1,
+ inplace_relu=inplace_relu,
+ dilation=dilation,
+ norm_module=self.norm_module)
def forward(self, x):
if hasattr(self, "branch1"):
@@ -279,6 +282,7 @@ class ResStage(paddle.nn.Layer):
"Slowfast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -324,11 +328,10 @@ def __init__(self,
assert all((num_block_temp_kernel[i] <= num_blocks[i]
for i in range(len(temp_kernel_sizes))))
self.num_blocks = num_blocks
- self.temp_kernel_sizes = [
- (temp_kernel_sizes[i] * num_blocks[i])[:num_block_temp_kernel[i]] +
- [1] * (num_blocks[i] - num_block_temp_kernel[i])
- for i in range(len(temp_kernel_sizes))
- ]
+ self.temp_kernel_sizes = [(temp_kernel_sizes[i] * num_blocks[i]
+ )[:num_block_temp_kernel[i]] + [1] *
+ (num_blocks[i] - num_block_temp_kernel[i])
+ for i in range(len(temp_kernel_sizes))]
assert (len({
len(dim_in),
len(dim_out),
@@ -349,20 +352,18 @@ def __init__(self,
num_groups,
stride_1x1,
inplace_relu,
- dilation,
- )
+ dilation, )
def _construct(
- self,
- dim_in,
- dim_out,
- stride,
- dim_inner,
- num_groups,
- stride_1x1,
- inplace_relu,
- dilation,
- ):
+ self,
+ dim_in,
+ dim_out,
+ stride,
+ dim_inner,
+ num_groups,
+ stride_1x1,
+ inplace_relu,
+ dilation, ):
for pathway in range(self.num_pathways):
for i in range(self.num_blocks[pathway]):
@@ -399,6 +400,7 @@ class ResNetBasicStem(paddle.nn.Layer):
Performs spatiotemporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -427,21 +429,23 @@ def _construct_stem(self, dim_in, dim_out):
padding=self.padding,
weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
bias_attr=False)
- self._bn = self.norm_module(num_features=dim_out,
- epsilon=self.eps,
- weight_attr=get_bn_param_attr(),
- bias_attr=get_bn_param_attr(bn_weight=0.0))
+ self._bn = self.norm_module(
+ num_features=dim_out,
+ epsilon=self.eps,
+ weight_attr=get_bn_param_attr(),
+ bias_attr=get_bn_param_attr(bn_weight=0.0))
def forward(self, x):
x = self._conv(x)
x = self._bn(x)
x = F.relu(x)
- x = F.max_pool3d(x=x,
- kernel_size=[1, 3, 3],
- stride=[1, 2, 2],
- padding=[0, 1, 1],
- data_format="NCDHW")
+ x = F.max_pool3d(
+ x=x,
+ kernel_size=[1, 3, 3],
+ stride=[1, 2, 2],
+ padding=[0, 1, 1],
+ data_format="NCDHW")
return x
@@ -450,6 +454,7 @@ class VideoModelStem(paddle.nn.Layer):
Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
on input data tensor for slow and fast pathways.
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -500,9 +505,9 @@ def _construct_stem(self, dim_in, dim_out):
self.add_sublayer("pathway{}_stem".format(pathway), stem)
def forward(self, x):
- assert (len(x) == self.num_pathways
- ), "Input tensor does not contain {} pathway".format(
- self.num_pathways)
+ assert (
+ len(x) == self.num_pathways
+ ), "Input tensor does not contain {} pathway".format(self.num_pathways)
for pathway in range(len(x)):
m = getattr(self, "pathway{}_stem".format(pathway))
@@ -517,6 +522,7 @@ class FuseFastToSlow(paddle.nn.Layer):
tensors from Slow pathway and Fast pathway, fuse information from Fast to
Slow, then return the fused tensors from Slow and Fast pathway in order.
"""
+
def __init__(self,
dim_in,
fusion_conv_channel_ratio,
@@ -548,10 +554,11 @@ def __init__(self,
padding=[fusion_kernel // 2, 0, 0],
weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
bias_attr=False)
- self._bn = norm_module(num_features=dim_in * fusion_conv_channel_ratio,
- epsilon=eps,
- weight_attr=get_bn_param_attr(),
- bias_attr=get_bn_param_attr(bn_weight=0.0))
+ self._bn = norm_module(
+ num_features=dim_in * fusion_conv_channel_ratio,
+ epsilon=eps,
+ weight_attr=get_bn_param_attr(),
+ bias_attr=get_bn_param_attr(bn_weight=0.0))
def forward(self, x):
x_s = x[0]
@@ -575,24 +582,24 @@ class ResNetSlowFast(paddle.nn.Layer):
"Slowfast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
+
def __init__(
- self,
- alpha,
- beta,
- bn_norm_type="batchnorm",
- bn_num_splits=1,
- num_pathways=2,
- depth=50,
- num_groups=1,
- input_channel_num=[3, 3],
- width_per_group=64,
- fusion_conv_channel_ratio=2,
- fusion_kernel_sz=7, #5?
- pool_size_ratio=[[1, 1, 1], [1, 1, 1]],
- fuse_bn_relu = 1,
- spatial_strides = [[1, 1], [2, 2], [2, 2], [2, 2]],
- use_pool_af_s2 = 1,
- ):
+ self,
+ alpha,
+ beta,
+ bn_norm_type="batchnorm",
+ bn_num_splits=1,
+ num_pathways=2,
+ depth=50,
+ num_groups=1,
+ input_channel_num=[3, 3],
+ width_per_group=64,
+ fusion_conv_channel_ratio=2,
+ fusion_kernel_sz=7, #5?
+ pool_size_ratio=[[1, 1, 1], [1, 1, 1]],
+ fuse_bn_relu=1,
+ spatial_strides=[[1, 1], [2, 2], [2, 2], [2, 2]],
+ use_pool_af_s2=1, ):
"""
Args:
cfg (CfgNode): model building configs, details are in the
@@ -665,22 +672,23 @@ def _construct_network(self):
out_dim_ratio = self.beta // self.fusion_conv_channel_ratio #4
dim_inner = self.width_per_group * self.num_groups #64
- self.s2 = ResStage(dim_in=[
- self.width_per_group + self.width_per_group // out_dim_ratio,
- self.width_per_group // self.beta,
- ],
- dim_out=[
- self.width_per_group * 4,
- self.width_per_group * 4 // self.beta,
- ],
- dim_inner=[dim_inner, dim_inner // self.beta],
- temp_kernel_sizes=temp_kernel[1],
- stride=spatial_strides[0],
- num_blocks=[d2] * 2,
- num_groups=[self.num_groups] * 2,
- num_block_temp_kernel=num_block_temp_kernel[0],
- dilation=spatial_dilations[0],
- norm_module=self.norm_module)
+ self.s2 = ResStage(
+ dim_in=[
+ self.width_per_group + self.width_per_group // out_dim_ratio,
+ self.width_per_group // self.beta,
+ ],
+ dim_out=[
+ self.width_per_group * 4,
+ self.width_per_group * 4 // self.beta,
+ ],
+ dim_inner=[dim_inner, dim_inner // self.beta],
+ temp_kernel_sizes=temp_kernel[1],
+ stride=spatial_strides[0],
+ num_blocks=[d2] * 2,
+ num_groups=[self.num_groups] * 2,
+ num_block_temp_kernel=num_block_temp_kernel[0],
+ dilation=spatial_dilations[0],
+ norm_module=self.norm_module)
self.s2_fuse = FuseFastToSlow(
dim_in=self.width_per_group * 4 // self.beta,
@@ -688,13 +696,12 @@ def _construct_network(self):
fusion_kernel=self.fusion_kernel_sz,
alpha=self.alpha,
norm_module=self.norm_module,
- fuse_bn_relu=self.fuse_bn_relu,
- )
+ fuse_bn_relu=self.fuse_bn_relu, )
self.s3 = ResStage(
dim_in=[
- self.width_per_group * 4 +
- self.width_per_group * 4 // out_dim_ratio,
+ self.width_per_group * 4 + self.width_per_group * 4 //
+ out_dim_ratio,
self.width_per_group * 4 // self.beta,
],
dim_out=[
@@ -708,8 +715,7 @@ def _construct_network(self):
num_groups=[self.num_groups] * 2,
num_block_temp_kernel=num_block_temp_kernel[1],
dilation=spatial_dilations[1],
- norm_module=self.norm_module,
- )
+ norm_module=self.norm_module, )
self.s3_fuse = FuseFastToSlow(
dim_in=self.width_per_group * 8 // self.beta,
@@ -717,13 +723,12 @@ def _construct_network(self):
fusion_kernel=self.fusion_kernel_sz,
alpha=self.alpha,
norm_module=self.norm_module,
- fuse_bn_relu=self.fuse_bn_relu,
- )
+ fuse_bn_relu=self.fuse_bn_relu, )
self.s4 = ResStage(
dim_in=[
- self.width_per_group * 8 +
- self.width_per_group * 8 // out_dim_ratio,
+ self.width_per_group * 8 + self.width_per_group * 8 //
+ out_dim_ratio,
self.width_per_group * 8 // self.beta,
],
dim_out=[
@@ -737,8 +742,7 @@ def _construct_network(self):
num_groups=[self.num_groups] * 2,
num_block_temp_kernel=num_block_temp_kernel[2],
dilation=spatial_dilations[2],
- norm_module=self.norm_module,
- )
+ norm_module=self.norm_module, )
self.s4_fuse = FuseFastToSlow(
dim_in=self.width_per_group * 16 // self.beta,
@@ -746,13 +750,12 @@ def _construct_network(self):
fusion_kernel=self.fusion_kernel_sz,
alpha=self.alpha,
norm_module=self.norm_module,
- fuse_bn_relu=self.fuse_bn_relu,
- )
+ fuse_bn_relu=self.fuse_bn_relu, )
self.s5 = ResStage(
dim_in=[
- self.width_per_group * 16 +
- self.width_per_group * 16 // out_dim_ratio,
+ self.width_per_group * 16 + self.width_per_group * 16 //
+ out_dim_ratio,
self.width_per_group * 16 // self.beta,
],
dim_out=[
@@ -766,8 +769,7 @@ def _construct_network(self):
num_groups=[self.num_groups] * 2,
num_block_temp_kernel=num_block_temp_kernel[3],
dilation=spatial_dilations[3],
- norm_module=self.norm_module,
- )
+ norm_module=self.norm_module, )
def init_weights(self):
pass
@@ -781,11 +783,12 @@ def forward(self, x):
# TODO: For AVA, set use_pool_af_s2=1, check mAP's improve.
if self.use_pool_af_s2:
for pathway in range(self.num_pathways):
- x[pathway] = F.max_pool3d(x=x[pathway],
- kernel_size=self.pool_size_ratio[pathway],
- stride=self.pool_size_ratio[pathway],
- padding=[0, 0, 0],
- data_format="NCDHW")
+ x[pathway] = F.max_pool3d(
+ x=x[pathway],
+ kernel_size=self.pool_size_ratio[pathway],
+ stride=self.pool_size_ratio[pathway],
+ padding=[0, 0, 0],
+ data_format="NCDHW")
x = self.s3(x)
x = self.s3_fuse(x)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_slowfast_MRI.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_slowfast_MRI.py
old mode 100644
new mode 100755
index d348d45cf..eabfef3dd
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_slowfast_MRI.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_slowfast_MRI.py
@@ -45,6 +45,7 @@ class BottleneckTransform(paddle.nn.Layer):
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel.
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -99,10 +100,11 @@ def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups,
padding=[int(self.temp_kernel_size // 2), 0, 0],
weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
bias_attr=False)
- self.a_bn = self.norm_module(num_features=dim_inner,
- epsilon=self._eps,
- weight_attr=get_bn_param_attr(),
- bias_attr=get_bn_param_attr(bn_weight=0.0))
+ self.a_bn = self.norm_module(
+ num_features=dim_inner,
+ epsilon=self._eps,
+ weight_attr=get_bn_param_attr(),
+ bias_attr=get_bn_param_attr(bn_weight=0.0))
# 1x3x3, BN, ReLU.
fan = (dim_inner) * (1 * 3 * 3)
@@ -118,10 +120,11 @@ def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups,
dilation=[1, dilation, dilation],
weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
bias_attr=False)
- self.b_bn = self.norm_module(num_features=dim_inner,
- epsilon=self._eps,
- weight_attr=get_bn_param_attr(),
- bias_attr=get_bn_param_attr(bn_weight=0.0))
+ self.b_bn = self.norm_module(
+ num_features=dim_inner,
+ epsilon=self._eps,
+ weight_attr=get_bn_param_attr(),
+ bias_attr=get_bn_param_attr(bn_weight=0.0))
# 1x1x1, BN.
fan = (dim_out) * (1 * 1 * 1)
@@ -162,6 +165,7 @@ class ResBlock(paddle.nn.Layer):
"""
Residual block.
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -211,21 +215,19 @@ def __init__(self,
num_groups,
stride_1x1,
inplace_relu,
- dilation,
- )
+ dilation, )
def _construct(
- self,
- dim_in,
- dim_out,
- temp_kernel_size,
- stride,
- dim_inner,
- num_groups,
- stride_1x1,
- inplace_relu,
- dilation,
- ):
+ self,
+ dim_in,
+ dim_out,
+ temp_kernel_size,
+ stride,
+ dim_inner,
+ num_groups,
+ stride_1x1,
+ inplace_relu,
+ dilation, ):
# Use skip connection with projection if dim or res change.
if (dim_in != dim_out) or (stride != 1):
fan = (dim_out) * (1 * 1 * 1)
@@ -245,16 +247,17 @@ def _construct(
weight_attr=get_bn_param_attr(),
bias_attr=get_bn_param_attr(bn_weight=0.0))
- self.branch2 = BottleneckTransform(dim_in,
- dim_out,
- temp_kernel_size,
- stride,
- dim_inner,
- num_groups,
- stride_1x1=stride_1x1,
- inplace_relu=inplace_relu,
- dilation=dilation,
- norm_module=self.norm_module)
+ self.branch2 = BottleneckTransform(
+ dim_in,
+ dim_out,
+ temp_kernel_size,
+ stride,
+ dim_inner,
+ num_groups,
+ stride_1x1=stride_1x1,
+ inplace_relu=inplace_relu,
+ dilation=dilation,
+ norm_module=self.norm_module)
def forward(self, x):
if hasattr(self, "branch1"):
@@ -279,6 +282,7 @@ class ResStage(paddle.nn.Layer):
"Slowfast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -324,11 +328,10 @@ def __init__(self,
assert all((num_block_temp_kernel[i] <= num_blocks[i]
for i in range(len(temp_kernel_sizes))))
self.num_blocks = num_blocks
- self.temp_kernel_sizes = [
- (temp_kernel_sizes[i] * num_blocks[i])[:num_block_temp_kernel[i]] +
- [1] * (num_blocks[i] - num_block_temp_kernel[i])
- for i in range(len(temp_kernel_sizes))
- ]
+ self.temp_kernel_sizes = [(temp_kernel_sizes[i] * num_blocks[i]
+ )[:num_block_temp_kernel[i]] + [1] *
+ (num_blocks[i] - num_block_temp_kernel[i])
+ for i in range(len(temp_kernel_sizes))]
assert (len({
len(dim_in),
len(dim_out),
@@ -349,20 +352,18 @@ def __init__(self,
num_groups,
stride_1x1,
inplace_relu,
- dilation,
- )
+ dilation, )
def _construct(
- self,
- dim_in,
- dim_out,
- stride,
- dim_inner,
- num_groups,
- stride_1x1,
- inplace_relu,
- dilation,
- ):
+ self,
+ dim_in,
+ dim_out,
+ stride,
+ dim_inner,
+ num_groups,
+ stride_1x1,
+ inplace_relu,
+ dilation, ):
for pathway in range(self.num_pathways):
for i in range(self.num_blocks[pathway]):
@@ -399,6 +400,7 @@ class ResNetBasicStem(paddle.nn.Layer):
Performs spatiotemporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -427,21 +429,23 @@ def _construct_stem(self, dim_in, dim_out):
padding=self.padding,
weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
bias_attr=False)
- self._bn = self.norm_module(num_features=dim_out,
- epsilon=self.eps,
- weight_attr=get_bn_param_attr(),
- bias_attr=get_bn_param_attr(bn_weight=0.0))
+ self._bn = self.norm_module(
+ num_features=dim_out,
+ epsilon=self.eps,
+ weight_attr=get_bn_param_attr(),
+ bias_attr=get_bn_param_attr(bn_weight=0.0))
def forward(self, x):
x = self._conv(x)
x = self._bn(x)
x = F.relu(x)
- x = F.max_pool3d(x=x,
- kernel_size=[1, 3, 3],
- stride=[1, 2, 2],
- padding=[0, 1, 1],
- data_format="NCDHW")
+ x = F.max_pool3d(
+ x=x,
+ kernel_size=[1, 3, 3],
+ stride=[1, 2, 2],
+ padding=[0, 1, 1],
+ data_format="NCDHW")
return x
@@ -450,6 +454,7 @@ class VideoModelStem(paddle.nn.Layer):
Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
on input data tensor for slow and fast pathways.
"""
+
def __init__(self,
dim_in,
dim_out,
@@ -500,9 +505,9 @@ def _construct_stem(self, dim_in, dim_out):
self.add_sublayer("pathway{}_stem".format(pathway), stem)
def forward(self, x):
- assert (len(x) == self.num_pathways
- ), "Input tensor does not contain {} pathway".format(
- self.num_pathways)
+ assert (
+ len(x) == self.num_pathways
+ ), "Input tensor does not contain {} pathway".format(self.num_pathways)
for pathway in range(len(x)):
m = getattr(self, "pathway{}_stem".format(pathway))
@@ -517,6 +522,7 @@ class FuseFastToSlow(paddle.nn.Layer):
tensors from Slow pathway and Fast pathway, fuse information from Fast to
Slow, then return the fused tensors from Slow and Fast pathway in order.
"""
+
def __init__(self,
dim_in,
fusion_conv_channel_ratio,
@@ -548,10 +554,11 @@ def __init__(self,
padding=[fusion_kernel // 2, 0, 0],
weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
bias_attr=False)
- self._bn = norm_module(num_features=dim_in * fusion_conv_channel_ratio,
- epsilon=eps,
- weight_attr=get_bn_param_attr(),
- bias_attr=get_bn_param_attr(bn_weight=0.0))
+ self._bn = norm_module(
+ num_features=dim_in * fusion_conv_channel_ratio,
+ epsilon=eps,
+ weight_attr=get_bn_param_attr(),
+ bias_attr=get_bn_param_attr(bn_weight=0.0))
def forward(self, x):
x_s = x[0]
@@ -575,24 +582,24 @@ class ResNetSlowFast_MRI(paddle.nn.Layer):
"Slowfast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
+
def __init__(
- self,
- alpha,
- beta,
- bn_norm_type="batchnorm",
- bn_num_splits=1,
- num_pathways=2,
- depth=50,
- num_groups=1,
- input_channel_num=[1, 1],
- width_per_group=64,
- fusion_conv_channel_ratio=2,
- fusion_kernel_sz=7, #5?
- pool_size_ratio=[[1, 1, 1], [1, 1, 1]],
- fuse_bn_relu=1,
- spatial_strides=[[1, 1], [2, 2], [2, 2], [2, 2]],
- use_pool_af_s2=1,
- ):
+ self,
+ alpha,
+ beta,
+ bn_norm_type="batchnorm",
+ bn_num_splits=1,
+ num_pathways=2,
+ depth=50,
+ num_groups=1,
+ input_channel_num=[1, 1],
+ width_per_group=64,
+ fusion_conv_channel_ratio=2,
+ fusion_kernel_sz=7, #5?
+ pool_size_ratio=[[1, 1, 1], [1, 1, 1]],
+ fuse_bn_relu=1,
+ spatial_strides=[[1, 1], [2, 2], [2, 2], [2, 2]],
+ use_pool_af_s2=1, ):
"""
Args:
cfg (CfgNode): model building configs, details are in the
@@ -665,22 +672,23 @@ def _construct_network(self):
out_dim_ratio = self.beta // self.fusion_conv_channel_ratio #4
dim_inner = self.width_per_group * self.num_groups #64
- self.s2 = ResStage(dim_in=[
- self.width_per_group + self.width_per_group // out_dim_ratio,
- self.width_per_group // self.beta,
- ],
- dim_out=[
- self.width_per_group * 4,
- self.width_per_group * 4 // self.beta,
- ],
- dim_inner=[dim_inner, dim_inner // self.beta],
- temp_kernel_sizes=temp_kernel[1],
- stride=spatial_strides[0],
- num_blocks=[d2] * 2,
- num_groups=[self.num_groups] * 2,
- num_block_temp_kernel=num_block_temp_kernel[0],
- dilation=spatial_dilations[0],
- norm_module=self.norm_module)
+ self.s2 = ResStage(
+ dim_in=[
+ self.width_per_group + self.width_per_group // out_dim_ratio,
+ self.width_per_group // self.beta,
+ ],
+ dim_out=[
+ self.width_per_group * 4,
+ self.width_per_group * 4 // self.beta,
+ ],
+ dim_inner=[dim_inner, dim_inner // self.beta],
+ temp_kernel_sizes=temp_kernel[1],
+ stride=spatial_strides[0],
+ num_blocks=[d2] * 2,
+ num_groups=[self.num_groups] * 2,
+ num_block_temp_kernel=num_block_temp_kernel[0],
+ dilation=spatial_dilations[0],
+ norm_module=self.norm_module)
self.s2_fuse = FuseFastToSlow(
dim_in=self.width_per_group * 4 // self.beta,
@@ -688,13 +696,12 @@ def _construct_network(self):
fusion_kernel=self.fusion_kernel_sz,
alpha=self.alpha,
norm_module=self.norm_module,
- fuse_bn_relu=self.fuse_bn_relu,
- )
+ fuse_bn_relu=self.fuse_bn_relu, )
self.s3 = ResStage(
dim_in=[
- self.width_per_group * 4 +
- self.width_per_group * 4 // out_dim_ratio,
+ self.width_per_group * 4 + self.width_per_group * 4 //
+ out_dim_ratio,
self.width_per_group * 4 // self.beta,
],
dim_out=[
@@ -708,8 +715,7 @@ def _construct_network(self):
num_groups=[self.num_groups] * 2,
num_block_temp_kernel=num_block_temp_kernel[1],
dilation=spatial_dilations[1],
- norm_module=self.norm_module,
- )
+ norm_module=self.norm_module, )
self.s3_fuse = FuseFastToSlow(
dim_in=self.width_per_group * 8 // self.beta,
@@ -717,13 +723,12 @@ def _construct_network(self):
fusion_kernel=self.fusion_kernel_sz,
alpha=self.alpha,
norm_module=self.norm_module,
- fuse_bn_relu=self.fuse_bn_relu,
- )
+ fuse_bn_relu=self.fuse_bn_relu, )
self.s4 = ResStage(
dim_in=[
- self.width_per_group * 8 +
- self.width_per_group * 8 // out_dim_ratio,
+ self.width_per_group * 8 + self.width_per_group * 8 //
+ out_dim_ratio,
self.width_per_group * 8 // self.beta,
],
dim_out=[
@@ -737,8 +742,7 @@ def _construct_network(self):
num_groups=[self.num_groups] * 2,
num_block_temp_kernel=num_block_temp_kernel[2],
dilation=spatial_dilations[2],
- norm_module=self.norm_module,
- )
+ norm_module=self.norm_module, )
self.s4_fuse = FuseFastToSlow(
dim_in=self.width_per_group * 16 // self.beta,
@@ -746,13 +750,12 @@ def _construct_network(self):
fusion_kernel=self.fusion_kernel_sz,
alpha=self.alpha,
norm_module=self.norm_module,
- fuse_bn_relu=self.fuse_bn_relu,
- )
+ fuse_bn_relu=self.fuse_bn_relu, )
self.s5 = ResStage(
dim_in=[
- self.width_per_group * 16 +
- self.width_per_group * 16 // out_dim_ratio,
+ self.width_per_group * 16 + self.width_per_group * 16 //
+ out_dim_ratio,
self.width_per_group * 16 // self.beta,
],
dim_out=[
@@ -766,8 +769,7 @@ def _construct_network(self):
num_groups=[self.num_groups] * 2,
num_block_temp_kernel=num_block_temp_kernel[3],
dilation=spatial_dilations[3],
- norm_module=self.norm_module,
- )
+ norm_module=self.norm_module, )
def init_weights(self):
pass
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsm.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsm.py
old mode 100644
new mode 100755
index 9fa5093e8..6f4b1a6e9
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsm.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsm.py
@@ -38,6 +38,7 @@ class ConvBNLayer(nn.Layer):
Note: weight and bias initialization include initialize values and name the restored parameters, values initialization are explicit declared in the ```init_weights``` method.
"""
+
def __init__(self,
in_channels,
out_channels,
@@ -48,15 +49,16 @@ def __init__(self,
name=None,
data_format="NCHW"):
super(ConvBNLayer, self).__init__()
- self._conv = Conv2D(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=(kernel_size - 1) // 2,
- groups=groups,
- weight_attr=ParamAttr(name=name + "_weights"),
- bias_attr=False,
- data_format=data_format)
+ self._conv = Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(name=name + "_weights"),
+ bias_attr=False,
+ data_format=data_format)
if name == "conv1":
bn_name = "bn_" + name
else:
@@ -66,10 +68,10 @@ def __init__(self,
self._batch_norm = BatchNorm2D(
out_channels,
- weight_attr=ParamAttr(name=bn_name + "_scale",
- regularizer=L2Decay(0.0)),
- bias_attr=ParamAttr(name=bn_name + "_offset",
- regularizer=L2Decay(0.0)),
+ weight_attr=ParamAttr(
+ name=bn_name + "_scale", regularizer=L2Decay(0.0)),
+ bias_attr=ParamAttr(
+ name=bn_name + "_offset", regularizer=L2Decay(0.0)),
data_format=data_format)
def forward(self, inputs):
@@ -91,34 +93,38 @@ def __init__(self,
data_format="NCHW"):
super(BottleneckBlock, self).__init__()
self.data_format = data_format
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- act="relu",
- name=name + "_branch2a",
- data_format=data_format)
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- stride=stride,
- act="relu",
- name=name + "_branch2b",
- data_format=data_format)
-
- self.conv2 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- act=None,
- name=name + "_branch2c",
- data_format=data_format)
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ act="relu",
+ name=name + "_branch2a",
+ data_format=data_format)
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=stride,
+ act="relu",
+ name=name + "_branch2b",
+ data_format=data_format)
+
+ self.conv2 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ act=None,
+ name=name + "_branch2c",
+ data_format=data_format)
if not shortcut:
- self.short = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- stride=stride,
- name=name + "_branch1",
- data_format=data_format)
+ self.short = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ stride=stride,
+ name=name + "_branch1",
+ data_format=data_format)
self.shortcut = shortcut
self.num_seg = num_seg
@@ -129,22 +135,36 @@ def forward(self, inputs):
seg_num = self.num_seg
shift_ratio = 1.0 / self.num_seg
- shape = x.shape #[N*T, C, H, W]
- reshape_x = x.reshape((-1, seg_num, shape[1], shape[2], shape[3])) #[N, T, C, H, W]
- pad_x = paddle.fluid.layers.pad(reshape_x, [0,0,1,1,0,0,0,0,0,0,]) #[N, T+2, C, H, W]
+ shape = x.shape #[N*T, C, H, W]
+ reshape_x = x.reshape(
+ (-1, seg_num, shape[1], shape[2], shape[3])) #[N, T, C, H, W]
+ pad_x = paddle.fluid.layers.pad(reshape_x, [
+ 0,
+ 0,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ]) #[N, T+2, C, H, W]
c1 = int(shape[1] * shift_ratio)
c2 = int(shape[1] * 2 * shift_ratio)
slice1 = pad_x[:, :seg_num, :c1, :, :]
- slice2 = pad_x[:, 2:seg_num+2, c1:c2, :, :]
- slice3 = pad_x[:, 1:seg_num+1, c2:, :, :]
- concat_x = paddle.concat([slice1, slice2, slice3], axis=2) #[N, T, C, H, W]
+ slice2 = pad_x[:, 2:seg_num + 2, c1:c2, :, :]
+ slice3 = pad_x[:, 1:seg_num + 1, c2:, :, :]
+ concat_x = paddle.concat(
+ [slice1, slice2, slice3], axis=2) #[N, T, C, H, W]
shifts = concat_x.reshape(shape)
else:
- shifts = F.temporal_shift(inputs,
- self.num_seg,
- 1.0 / self.num_seg,
- data_format=self.data_format)
-
+ shifts = F.temporal_shift(
+ inputs,
+ self.num_seg,
+ 1.0 / self.num_seg,
+ data_format=self.data_format)
+
y = self.conv0(shifts)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
@@ -173,16 +193,14 @@ def __init__(self,
stride=stride,
act="relu",
name=name + "_branch2a",
- data_format=data_format,
- )
+ data_format=data_format, )
self.conv1 = ConvBNLayer(
in_channels=out_channels,
out_channels=out_channels,
filter_size=3,
act=None,
name=name + "_branch2b",
- data_format=data_format,
- )
+ data_format=data_format, )
if not shortcut:
self.short = ConvBNLayer(
@@ -191,8 +209,7 @@ def __init__(self,
filter_size=1,
stride=stride,
name=name + "_branch1",
- data_format=data_format,
- )
+ data_format=data_format, )
self.shortcut = shortcut
@@ -217,6 +234,7 @@ class ResNetTSM(nn.Layer):
depth (int): Depth of resnet model.
pretrained (str): pretrained model. Default: None.
"""
+
def __init__(self, depth, num_seg=8, data_format="NCHW", pretrained=None):
super(ResNetTSM, self).__init__()
self.pretrained = pretrained
@@ -241,19 +259,19 @@ def __init__(self, depth, num_seg=8, data_format="NCHW", pretrained=None):
in_channels = 64
out_channels = [64, 128, 256, 512]
- self.conv = ConvBNLayer(in_channels=3,
- out_channels=64,
- kernel_size=7,
- stride=2,
- act="relu",
- name="conv1",
- data_format=self.data_format)
+ self.conv = ConvBNLayer(
+ in_channels=3,
+ out_channels=64,
+ kernel_size=7,
+ stride=2,
+ act="relu",
+ name="conv1",
+ data_format=self.data_format)
self.pool2D_max = MaxPool2D(
kernel_size=3,
stride=2,
padding=1,
- data_format=self.data_format,
- )
+ data_format=self.data_format, )
self.block_list = []
if self.layers >= 50:
@@ -295,8 +313,7 @@ def __init__(self, depth, num_seg=8, data_format="NCHW", pretrained=None):
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut,
name=conv_name,
- data_format=self.data_format,
- ))
+ data_format=self.data_format, ))
self.block_list.append(basic_block)
shortcut = True
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsm_MRI.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsm_MRI.py
old mode 100644
new mode 100755
index ae4fdc70f..9466dbea5
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsm_MRI.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsm_MRI.py
@@ -45,6 +45,7 @@ class ConvBNLayer(nn.Layer):
Note: weight and bias initialization include initialize values and name the restored parameters, values initialization are explicit declared in the ```init_weights``` method.
"""
+
def __init__(self,
in_channels,
out_channels,
@@ -58,19 +59,18 @@ def __init__(self,
self.is_tweaks_mode = is_tweaks_mode
#ResNet-D 1/2:add a 2×2 average pooling layer with a stride of 2 before the convolution,
# whose stride is changed to 1, works well in practice.
- self._pool2d_avg = AvgPool2D(kernel_size=2,
- stride=2,
- padding=0,
- ceil_mode=True)
-
- self._conv = Conv2D(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=(kernel_size - 1) // 2,
- groups=groups,
- weight_attr=ParamAttr(name=name + "_weights"),
- bias_attr=False)
+ self._pool2d_avg = AvgPool2D(
+ kernel_size=2, stride=2, padding=0, ceil_mode=True)
+
+ self._conv = Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(name=name + "_weights"),
+ bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
@@ -80,9 +80,10 @@ def __init__(self,
self._batch_norm = BatchNorm2D(
out_channels,
- weight_attr=ParamAttr(name=bn_name + "_scale",
- regularizer=L2Decay(0.0)),
- bias_attr=ParamAttr(bn_name + "_offset", regularizer=L2Decay(0.0)))
+ weight_attr=ParamAttr(
+ name=bn_name + "_scale", regularizer=L2Decay(0.0)),
+ bias_attr=ParamAttr(
+ bn_name + "_offset", regularizer=L2Decay(0.0)))
def forward(self, inputs):
if self.is_tweaks_mode:
@@ -104,31 +105,33 @@ def __init__(self,
num_seg=8,
name=None):
super(BottleneckBlock, self).__init__()
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- act="leaky_relu",
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- stride=stride,
- act="leaky_relu",
- name=name + "_branch2b")
-
- self.conv2 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- act=None,
- name=name + "_branch2c")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ act="leaky_relu",
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=stride,
+ act="leaky_relu",
+ name=name + "_branch2b")
+
+ self.conv2 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ act=None,
+ name=name + "_branch2c")
if not shortcut:
self.short = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels * 4,
kernel_size=1,
- stride=
- 1, #ResNet-D 2/2:add a 2×2 average pooling layer with a stride of 2 before the convolution,
+ stride=1, #ResNet-D 2/2:add a 2×2 average pooling layer with a stride of 2 before the convolution,
# whose stride is changed to 1, works well in practice.
is_tweaks_mode=False if if_first else True,
name=name + "_branch1")
@@ -159,24 +162,27 @@ def __init__(self,
name=None):
super(BasicBlock, self).__init__()
self.stride = stride
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- filter_size=3,
- stride=stride,
- act="leaky_relu",
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- filter_size=3,
- act=None,
- name=name + "_branch2b")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ filter_size=3,
+ stride=stride,
+ act="leaky_relu",
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ filter_size=3,
+ act=None,
+ name=name + "_branch2b")
if not shortcut:
- self.short = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- filter_size=1,
- stride=stride,
- name=name + "_branch1")
+ self.short = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ filter_size=1,
+ stride=stride,
+ name=name + "_branch1")
self.shortcut = shortcut
@@ -201,6 +207,7 @@ class ResNetTSM_MRI(nn.Layer):
depth (int): Depth of resnet model.
pretrained (str): pretrained model. Default: None.
"""
+
def __init__(self, depth, num_seg=8, pretrained=None, in_channels=1):
super(ResNetTSM_MRI, self).__init__()
self.pretrained = pretrained
@@ -226,24 +233,27 @@ def __init__(self, depth, num_seg=8, pretrained=None, in_channels=1):
out_channels = [64, 128, 256, 512]
#ResNet-C: use three 3x3 conv, replace, one 7x7 conv
- self.conv1_1 = ConvBNLayer(in_channels=self.in_channels,
- out_channels=32,
- kernel_size=3,
- stride=2,
- act='leaky_relu',
- name="conv1_1")
- self.conv1_2 = ConvBNLayer(in_channels=32,
- out_channels=32,
- kernel_size=3,
- stride=1,
- act='leaky_relu',
- name="conv1_2")
- self.conv1_3 = ConvBNLayer(in_channels=32,
- out_channels=64,
- kernel_size=3,
- stride=1,
- act='leaky_relu',
- name="conv1_3")
+ self.conv1_1 = ConvBNLayer(
+ in_channels=self.in_channels,
+ out_channels=32,
+ kernel_size=3,
+ stride=2,
+ act='leaky_relu',
+ name="conv1_1")
+ self.conv1_2 = ConvBNLayer(
+ in_channels=32,
+ out_channels=32,
+ kernel_size=3,
+ stride=1,
+ act='leaky_relu',
+ name="conv1_2")
+ self.conv1_3 = ConvBNLayer(
+ in_channels=32,
+ out_channels=64,
+ kernel_size=3,
+ stride=1,
+ act='leaky_relu',
+ name="conv1_3")
self.pool2D_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
self.block_list = []
@@ -260,7 +270,8 @@ def __init__(self, depth, num_seg=8, pretrained=None, in_channels=1):
conv_name = "res" + str(block + 2) + chr(97 + i)
bottleneck_block = self.add_sublayer(
'bb_%d_%d' %
- (block, i), #same with PaddleClas, for loading pretrain
+ (block,
+ i), #same with PaddleClas, for loading pretrain
BottleneckBlock(
in_channels=in_channels
if i == 0 else out_channels[block] * 4,
@@ -280,12 +291,13 @@ def __init__(self, depth, num_seg=8, pretrained=None, in_channels=1):
conv_name = "res" + str(block + 2) + chr(97 + i)
basic_block = self.add_sublayer(
conv_name,
- BasicBlock(in_channels=in_channels[block]
- if i == 0 else out_channels[block],
- out_channels=out_channels[block],
- stride=2 if i == 0 and block != 0 else 1,
- shortcut=shortcut,
- name=conv_name))
+ BasicBlock(
+ in_channels=in_channels[block]
+ if i == 0 else out_channels[block],
+ out_channels=out_channels[block],
+ stride=2 if i == 0 and block != 0 else 1,
+ shortcut=shortcut,
+ name=conv_name))
self.block_list.append(basic_block)
shortcut = True
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsn_MRI.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsn_MRI.py
old mode 100644
new mode 100755
index 439a0eff8..de2a9cc68
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsn_MRI.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tsn_MRI.py
@@ -44,19 +44,18 @@ def __init__(self,
name=None):
super(ConvBNLayer, self).__init__()
self.is_tweaks_mode = is_tweaks_mode
- self._pool2d_avg = AvgPool2D(kernel_size=2,
- stride=2,
- padding=0,
- ceil_mode=True)
- self._conv = Conv2D(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=(kernel_size - 1) // 2,
- groups=groups,
- weight_attr=ParamAttr(name=name + "_weights",
- learning_rate=lr_mult),
- bias_attr=False)
+ self._pool2d_avg = AvgPool2D(
+ kernel_size=2, stride=2, padding=0, ceil_mode=True)
+ self._conv = Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(
+ name=name + "_weights", learning_rate=lr_mult),
+ bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
@@ -64,12 +63,14 @@ def __init__(self,
self._batch_norm = BatchNorm(
out_channels,
act=act,
- param_attr=ParamAttr(name=bn_name + '_scale',
- learning_rate=lr_mult,
- regularizer=L2Decay(0.0)),
- bias_attr=ParamAttr(bn_name + '_offset',
- learning_rate=lr_mult,
- regularizer=L2Decay(0.0)),
+ param_attr=ParamAttr(
+ name=bn_name + '_scale',
+ learning_rate=lr_mult,
+ regularizer=L2Decay(0.0)),
+ bias_attr=ParamAttr(
+ bn_name + '_offset',
+ learning_rate=lr_mult,
+ regularizer=L2Decay(0.0)),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
@@ -92,34 +93,38 @@ def __init__(self,
name=None):
super(BottleneckBlock, self).__init__()
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- act='relu',
- lr_mult=lr_mult,
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- stride=stride,
- act='relu',
- lr_mult=lr_mult,
- name=name + "_branch2b")
- self.conv2 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- act=None,
- lr_mult=lr_mult,
- name=name + "_branch2c")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ act='relu',
+ lr_mult=lr_mult,
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=stride,
+ act='relu',
+ lr_mult=lr_mult,
+ name=name + "_branch2b")
+ self.conv2 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ act=None,
+ lr_mult=lr_mult,
+ name=name + "_branch2c")
if not shortcut:
- self.short = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- stride=1,
- is_tweaks_mode=False if if_first else True,
- lr_mult=lr_mult,
- name=name + "_branch1")
+ self.short = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ stride=1,
+ is_tweaks_mode=False if if_first else True,
+ lr_mult=lr_mult,
+ name=name + "_branch1")
self.shortcut = shortcut
@@ -148,28 +153,31 @@ def __init__(self,
name=None):
super(BasicBlock, self).__init__()
self.stride = stride
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=3,
- stride=stride,
- act='relu',
- lr_mult=lr_mult,
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- act=None,
- lr_mult=lr_mult,
- name=name + "_branch2b")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=stride,
+ act='relu',
+ lr_mult=lr_mult,
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ act=None,
+ lr_mult=lr_mult,
+ name=name + "_branch2b")
if not shortcut:
- self.short = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- stride=1,
- is_tweaks_mode=False if if_first else True,
- lr_mult=lr_mult,
- name=name + "_branch1")
+ self.short = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ stride=1,
+ is_tweaks_mode=False if if_first else True,
+ lr_mult=lr_mult,
+ name=name + "_branch1")
self.shortcut = shortcut
@@ -194,6 +202,7 @@ class ResNetTSN_MRI(nn.Layer):
depth (int): Depth of resnet model.
pretrained (str): pretrained model. Default: None.
"""
+
def __init__(self,
layers=50,
pretrained=None,
@@ -210,11 +219,10 @@ def __init__(self,
self.lr_mult_list = lr_mult_list
self.in_channels = in_channels
- assert isinstance(
- self.lr_mult_list,
- (list, tuple
- )), "lr_mult_list should be in (list, tuple) but got {}".format(
- type(self.lr_mult_list))
+ assert isinstance(self.lr_mult_list, (
+ list, tuple
+ )), "lr_mult_list should be in (list, tuple) but got {}".format(
+ type(self.lr_mult_list))
assert len(
self.lr_mult_list
) == 5, "lr_mult_list length should should be 5 but got {}".format(
@@ -230,31 +238,34 @@ def __init__(self,
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
- num_channels = [64, 256, 512, 1024
- ] if layers >= 50 else [64, 64, 128, 256]
+ num_channels = [64, 256, 512,
+ 1024] if layers >= 50 else [64, 64, 128, 256]
num_filters = [64, 128, 256, 512]
- self.conv1_1 = ConvBNLayer(in_channels=self.in_channels,
- out_channels=32,
- kernel_size=3,
- stride=2,
- act='relu',
- lr_mult=self.lr_mult_list[0],
- name="conv1_1")
- self.conv1_2 = ConvBNLayer(in_channels=32,
- out_channels=32,
- kernel_size=3,
- stride=1,
- act='relu',
- lr_mult=self.lr_mult_list[0],
- name="conv1_2")
- self.conv1_3 = ConvBNLayer(in_channels=32,
- out_channels=64,
- kernel_size=3,
- stride=1,
- act='relu',
- lr_mult=self.lr_mult_list[0],
- name="conv1_3")
+ self.conv1_1 = ConvBNLayer(
+ in_channels=self.in_channels,
+ out_channels=32,
+ kernel_size=3,
+ stride=2,
+ act='relu',
+ lr_mult=self.lr_mult_list[0],
+ name="conv1_1")
+ self.conv1_2 = ConvBNLayer(
+ in_channels=32,
+ out_channels=32,
+ kernel_size=3,
+ stride=1,
+ act='relu',
+ lr_mult=self.lr_mult_list[0],
+ name="conv1_2")
+ self.conv1_3 = ConvBNLayer(
+ in_channels=32,
+ out_channels=64,
+ kernel_size=3,
+ stride=1,
+ act='relu',
+ lr_mult=self.lr_mult_list[0],
+ name="conv1_3")
self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
self.block_list = []
@@ -289,14 +300,15 @@ def __init__(self,
conv_name = "res" + str(block + 2) + chr(97 + i)
basic_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
- BasicBlock(in_channels=num_channels[block]
- if i == 0 else num_filters[block],
- out_channels=num_filters[block],
- stride=2 if i == 0 and block != 0 else 1,
- shortcut=shortcut,
- if_first=block == i == 0,
- name=conv_name,
- lr_mult=self.lr_mult_list[block + 1]))
+ BasicBlock(
+ in_channels=num_channels[block]
+ if i == 0 else num_filters[block],
+ out_channels=num_filters[block],
+ stride=2 if i == 0 and block != 0 else 1,
+ shortcut=shortcut,
+ if_first=block == i == 0,
+ name=conv_name,
+ lr_mult=self.lr_mult_list[block + 1]))
self.block_list.append(basic_block)
shortcut = True
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tweaks_tsm.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tweaks_tsm.py
old mode 100644
new mode 100755
index f2ed947c5..ca4d44d08
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tweaks_tsm.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tweaks_tsm.py
@@ -45,6 +45,7 @@ class ConvBNLayer(nn.Layer):
Note: weight and bias initialization include initialize values and name the restored parameters, values initialization are explicit declared in the ```init_weights``` method.
"""
+
def __init__(self,
in_channels,
out_channels,
@@ -58,19 +59,18 @@ def __init__(self,
self.is_tweaks_mode = is_tweaks_mode
#ResNet-D 1/2:add a 2×2 average pooling layer with a stride of 2 before the convolution,
# whose stride is changed to 1, works well in practice.
- self._pool2d_avg = AvgPool2D(kernel_size=2,
- stride=2,
- padding=0,
- ceil_mode=True)
-
- self._conv = Conv2D(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=(kernel_size - 1) // 2,
- groups=groups,
- weight_attr=ParamAttr(name=name + "_weights"),
- bias_attr=False)
+ self._pool2d_avg = AvgPool2D(
+ kernel_size=2, stride=2, padding=0, ceil_mode=True)
+
+ self._conv = Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(name=name + "_weights"),
+ bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
@@ -80,9 +80,10 @@ def __init__(self,
self._batch_norm = BatchNorm2D(
out_channels,
- weight_attr=ParamAttr(name=bn_name + "_scale",
- regularizer=L2Decay(0.0)),
- bias_attr=ParamAttr(bn_name + "_offset", regularizer=L2Decay(0.0)))
+ weight_attr=ParamAttr(
+ name=bn_name + "_scale", regularizer=L2Decay(0.0)),
+ bias_attr=ParamAttr(
+ bn_name + "_offset", regularizer=L2Decay(0.0)))
def forward(self, inputs):
if self.is_tweaks_mode:
@@ -104,31 +105,33 @@ def __init__(self,
num_seg=8,
name=None):
super(BottleneckBlock, self).__init__()
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- act="leaky_relu",
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- stride=stride,
- act="leaky_relu",
- name=name + "_branch2b")
-
- self.conv2 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- act=None,
- name=name + "_branch2c")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ act="leaky_relu",
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=stride,
+ act="leaky_relu",
+ name=name + "_branch2b")
+
+ self.conv2 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ act=None,
+ name=name + "_branch2c")
if not shortcut:
self.short = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels * 4,
kernel_size=1,
- stride=
- 1, #ResNet-D 2/2:add a 2×2 average pooling layer with a stride of 2 before the convolution,
+ stride=1, #ResNet-D 2/2:add a 2×2 average pooling layer with a stride of 2 before the convolution,
# whose stride is changed to 1, works well in practice.
is_tweaks_mode=False if if_first else True,
name=name + "_branch1")
@@ -142,20 +145,33 @@ def forward(self, inputs):
seg_num = self.num_seg
shift_ratio = 1.0 / self.num_seg
- shape = x.shape #[N*T, C, H, W]
- reshape_x = x.reshape((-1, seg_num, shape[1], shape[2], shape[3])) #[N, T, C, H, W]
- pad_x = paddle.fluid.layers.pad(reshape_x, [0,0,1,1,0,0,0,0,0,0,]) #[N, T+2, C, H, W]
+ shape = x.shape #[N*T, C, H, W]
+ reshape_x = x.reshape(
+ (-1, seg_num, shape[1], shape[2], shape[3])) #[N, T, C, H, W]
+ pad_x = paddle.fluid.layers.pad(reshape_x, [
+ 0,
+ 0,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ]) #[N, T+2, C, H, W]
c1 = int(shape[1] * shift_ratio)
c2 = int(shape[1] * 2 * shift_ratio)
slice1 = pad_x[:, :seg_num, :c1, :, :]
- slice2 = pad_x[:, 2:seg_num+2, c1:c2, :, :]
- slice3 = pad_x[:, 1:seg_num+1, c2:, :, :]
- concat_x = paddle.concat([slice1, slice2, slice3], axis=2) #[N, T, C, H, W]
+ slice2 = pad_x[:, 2:seg_num + 2, c1:c2, :, :]
+ slice3 = pad_x[:, 1:seg_num + 1, c2:, :, :]
+ concat_x = paddle.concat(
+ [slice1, slice2, slice3], axis=2) #[N, T, C, H, W]
shifts = concat_x.reshape(shape)
else:
shifts = paddle.fluid.layers.temporal_shift(inputs, self.num_seg,
1.0 / self.num_seg)
-
+
y = self.conv0(shifts)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
@@ -176,24 +192,27 @@ def __init__(self,
name=None):
super(BasicBlock, self).__init__()
self.stride = stride
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- filter_size=3,
- stride=stride,
- act="leaky_relu",
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- filter_size=3,
- act=None,
- name=name + "_branch2b")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ filter_size=3,
+ stride=stride,
+ act="leaky_relu",
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ filter_size=3,
+ act=None,
+ name=name + "_branch2b")
if not shortcut:
- self.short = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- filter_size=1,
- stride=stride,
- name=name + "_branch1")
+ self.short = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ filter_size=1,
+ stride=stride,
+ name=name + "_branch1")
self.shortcut = shortcut
@@ -218,6 +237,7 @@ class ResNetTweaksTSM(nn.Layer):
depth (int): Depth of resnet model.
pretrained (str): pretrained model. Default: None.
"""
+
def __init__(self, depth, num_seg=8, pretrained=None):
super(ResNetTweaksTSM, self).__init__()
self.pretrained = pretrained
@@ -242,24 +262,27 @@ def __init__(self, depth, num_seg=8, pretrained=None):
out_channels = [64, 128, 256, 512]
#ResNet-C: use three 3x3 conv, replace, one 7x7 conv
- self.conv1_1 = ConvBNLayer(in_channels=3,
- out_channels=32,
- kernel_size=3,
- stride=2,
- act='leaky_relu',
- name="conv1_1")
- self.conv1_2 = ConvBNLayer(in_channels=32,
- out_channels=32,
- kernel_size=3,
- stride=1,
- act='leaky_relu',
- name="conv1_2")
- self.conv1_3 = ConvBNLayer(in_channels=32,
- out_channels=64,
- kernel_size=3,
- stride=1,
- act='leaky_relu',
- name="conv1_3")
+ self.conv1_1 = ConvBNLayer(
+ in_channels=3,
+ out_channels=32,
+ kernel_size=3,
+ stride=2,
+ act='leaky_relu',
+ name="conv1_1")
+ self.conv1_2 = ConvBNLayer(
+ in_channels=32,
+ out_channels=32,
+ kernel_size=3,
+ stride=1,
+ act='leaky_relu',
+ name="conv1_2")
+ self.conv1_3 = ConvBNLayer(
+ in_channels=32,
+ out_channels=64,
+ kernel_size=3,
+ stride=1,
+ act='leaky_relu',
+ name="conv1_3")
self.pool2D_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
self.block_list = []
@@ -276,7 +299,8 @@ def __init__(self, depth, num_seg=8, pretrained=None):
conv_name = "res" + str(block + 2) + chr(97 + i)
bottleneck_block = self.add_sublayer(
'bb_%d_%d' %
- (block, i), #same with PaddleClas, for loading pretrain
+ (block,
+ i), #same with PaddleClas, for loading pretrain
BottleneckBlock(
in_channels=in_channels
if i == 0 else out_channels[block] * 4,
@@ -296,12 +320,13 @@ def __init__(self, depth, num_seg=8, pretrained=None):
conv_name = "res" + str(block + 2) + chr(97 + i)
basic_block = self.add_sublayer(
conv_name,
- BasicBlock(in_channels=in_channels[block]
- if i == 0 else out_channels[block],
- out_channels=out_channels[block],
- stride=2 if i == 0 and block != 0 else 1,
- shortcut=shortcut,
- name=conv_name))
+ BasicBlock(
+ in_channels=in_channels[block]
+ if i == 0 else out_channels[block],
+ out_channels=out_channels[block],
+ stride=2 if i == 0 and block != 0 else 1,
+ shortcut=shortcut,
+ name=conv_name))
self.block_list.append(basic_block)
shortcut = True
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tweaks_tsn.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tweaks_tsn.py
old mode 100644
new mode 100755
index 36b33073f..a36a33bd7
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tweaks_tsn.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/resnet_tweaks_tsn.py
@@ -44,19 +44,18 @@ def __init__(self,
name=None):
super(ConvBNLayer, self).__init__()
self.is_tweaks_mode = is_tweaks_mode
- self._pool2d_avg = AvgPool2D(kernel_size=2,
- stride=2,
- padding=0,
- ceil_mode=True)
- self._conv = Conv2D(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=(kernel_size - 1) // 2,
- groups=groups,
- weight_attr=ParamAttr(name=name + "_weights",
- learning_rate=lr_mult),
- bias_attr=False)
+ self._pool2d_avg = AvgPool2D(
+ kernel_size=2, stride=2, padding=0, ceil_mode=True)
+ self._conv = Conv2D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(
+ name=name + "_weights", learning_rate=lr_mult),
+ bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
@@ -64,12 +63,14 @@ def __init__(self,
self._batch_norm = BatchNorm(
out_channels,
act=act,
- param_attr=ParamAttr(name=bn_name + '_scale',
- learning_rate=lr_mult,
- regularizer=L2Decay(0.0)),
- bias_attr=ParamAttr(bn_name + '_offset',
- learning_rate=lr_mult,
- regularizer=L2Decay(0.0)),
+ param_attr=ParamAttr(
+ name=bn_name + '_scale',
+ learning_rate=lr_mult,
+ regularizer=L2Decay(0.0)),
+ bias_attr=ParamAttr(
+ bn_name + '_offset',
+ learning_rate=lr_mult,
+ regularizer=L2Decay(0.0)),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
@@ -92,34 +93,38 @@ def __init__(self,
name=None):
super(BottleneckBlock, self).__init__()
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- act='relu',
- lr_mult=lr_mult,
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- stride=stride,
- act='relu',
- lr_mult=lr_mult,
- name=name + "_branch2b")
- self.conv2 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- act=None,
- lr_mult=lr_mult,
- name=name + "_branch2c")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ act='relu',
+ lr_mult=lr_mult,
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=stride,
+ act='relu',
+ lr_mult=lr_mult,
+ name=name + "_branch2b")
+ self.conv2 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ act=None,
+ lr_mult=lr_mult,
+ name=name + "_branch2c")
if not shortcut:
- self.short = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels * 4,
- kernel_size=1,
- stride=1,
- is_tweaks_mode=False if if_first else True,
- lr_mult=lr_mult,
- name=name + "_branch1")
+ self.short = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels * 4,
+ kernel_size=1,
+ stride=1,
+ is_tweaks_mode=False if if_first else True,
+ lr_mult=lr_mult,
+ name=name + "_branch1")
self.shortcut = shortcut
@@ -148,28 +153,31 @@ def __init__(self,
name=None):
super(BasicBlock, self).__init__()
self.stride = stride
- self.conv0 = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=3,
- stride=stride,
- act='relu',
- lr_mult=lr_mult,
- name=name + "_branch2a")
- self.conv1 = ConvBNLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- act=None,
- lr_mult=lr_mult,
- name=name + "_branch2b")
+ self.conv0 = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=stride,
+ act='relu',
+ lr_mult=lr_mult,
+ name=name + "_branch2a")
+ self.conv1 = ConvBNLayer(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ act=None,
+ lr_mult=lr_mult,
+ name=name + "_branch2b")
if not shortcut:
- self.short = ConvBNLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- stride=1,
- is_tweaks_mode=False if if_first else True,
- lr_mult=lr_mult,
- name=name + "_branch1")
+ self.short = ConvBNLayer(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ stride=1,
+ is_tweaks_mode=False if if_first else True,
+ lr_mult=lr_mult,
+ name=name + "_branch1")
self.shortcut = shortcut
@@ -194,6 +202,7 @@ class ResNetTweaksTSN(nn.Layer):
depth (int): Depth of resnet model.
pretrained (str): pretrained model. Default: None.
"""
+
def __init__(self,
layers=50,
pretrained=None,
@@ -208,11 +217,10 @@ def __init__(self,
supported_layers, layers)
self.lr_mult_list = lr_mult_list
- assert isinstance(
- self.lr_mult_list,
- (list, tuple
- )), "lr_mult_list should be in (list, tuple) but got {}".format(
- type(self.lr_mult_list))
+ assert isinstance(self.lr_mult_list, (
+ list, tuple
+ )), "lr_mult_list should be in (list, tuple) but got {}".format(
+ type(self.lr_mult_list))
assert len(
self.lr_mult_list
) == 5, "lr_mult_list length should should be 5 but got {}".format(
@@ -228,31 +236,34 @@ def __init__(self,
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
- num_channels = [64, 256, 512, 1024
- ] if layers >= 50 else [64, 64, 128, 256]
+ num_channels = [64, 256, 512,
+ 1024] if layers >= 50 else [64, 64, 128, 256]
num_filters = [64, 128, 256, 512]
- self.conv1_1 = ConvBNLayer(in_channels=3,
- out_channels=32,
- kernel_size=3,
- stride=2,
- act='relu',
- lr_mult=self.lr_mult_list[0],
- name="conv1_1")
- self.conv1_2 = ConvBNLayer(in_channels=32,
- out_channels=32,
- kernel_size=3,
- stride=1,
- act='relu',
- lr_mult=self.lr_mult_list[0],
- name="conv1_2")
- self.conv1_3 = ConvBNLayer(in_channels=32,
- out_channels=64,
- kernel_size=3,
- stride=1,
- act='relu',
- lr_mult=self.lr_mult_list[0],
- name="conv1_3")
+ self.conv1_1 = ConvBNLayer(
+ in_channels=3,
+ out_channels=32,
+ kernel_size=3,
+ stride=2,
+ act='relu',
+ lr_mult=self.lr_mult_list[0],
+ name="conv1_1")
+ self.conv1_2 = ConvBNLayer(
+ in_channels=32,
+ out_channels=32,
+ kernel_size=3,
+ stride=1,
+ act='relu',
+ lr_mult=self.lr_mult_list[0],
+ name="conv1_2")
+ self.conv1_3 = ConvBNLayer(
+ in_channels=32,
+ out_channels=64,
+ kernel_size=3,
+ stride=1,
+ act='relu',
+ lr_mult=self.lr_mult_list[0],
+ name="conv1_3")
self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
self.block_list = []
@@ -287,14 +298,15 @@ def __init__(self,
conv_name = "res" + str(block + 2) + chr(97 + i)
basic_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
- BasicBlock(in_channels=num_channels[block]
- if i == 0 else num_filters[block],
- out_channels=num_filters[block],
- stride=2 if i == 0 and block != 0 else 1,
- shortcut=shortcut,
- if_first=block == i == 0,
- name=conv_name,
- lr_mult=self.lr_mult_list[block + 1]))
+ BasicBlock(
+ in_channels=num_channels[block]
+ if i == 0 else num_filters[block],
+ out_channels=num_filters[block],
+ stride=2 if i == 0 and block != 0 else 1,
+ shortcut=shortcut,
+ if_first=block == i == 0,
+ name=conv_name,
+ lr_mult=self.lr_mult_list[block + 1]))
self.block_list.append(basic_block)
shortcut = True
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/stgcn.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/stgcn.py
old mode 100644
new mode 100755
index e756375f4..ae9ad7048
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/stgcn.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/stgcn.py
@@ -77,9 +77,8 @@ def __init__(self,
self.dilation = dilation
self.get_edge(layout)
- self.hop_dis = get_hop_distance(self.num_node,
- self.edge,
- max_hop=max_hop)
+ self.hop_dis = get_hop_distance(
+ self.num_node, self.edge, max_hop=max_hop)
self.get_adjacency(strategy)
def __str__(self):
@@ -101,11 +100,11 @@ def get_edge(self, layout):
elif layout == 'ntu-rgb+d':
self.num_node = 25
self_link = [(i, i) for i in range(self.num_node)]
- neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5),
- (7, 6), (8, 7), (9, 21), (10, 9), (11, 10),
- (12, 11), (13, 1), (14, 13), (15, 14), (16, 15),
- (17, 1), (18, 17), (19, 18), (20, 19), (22, 23),
- (23, 8), (24, 25), (25, 12)]
+ neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21),
+ (6, 5), (7, 6), (8, 7), (9, 21), (10, 9),
+ (11, 10), (12, 11), (13, 1), (14, 13), (15, 14),
+ (16, 15), (17, 1), (18, 17), (19, 18), (20, 19),
+ (22, 23), (23, 8), (24, 25), (25, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 21 - 1
@@ -159,12 +158,13 @@ def __init__(self,
super().__init__()
self.kernel_size = kernel_size
- self.conv = nn.Conv2D(in_channels,
- out_channels * kernel_size,
- kernel_size=(t_kernel_size, 1),
- padding=(t_padding, 0),
- stride=(t_stride, 1),
- dilation=(t_dilation, 1))
+ self.conv = nn.Conv2D(
+ in_channels,
+ out_channels * kernel_size,
+ kernel_size=(t_kernel_size, 1),
+ padding=(t_padding, 0),
+ stride=(t_stride, 1),
+ dilation=(t_dilation, 1))
def forward(self, x, A):
assert A.shape[0] == self.kernel_size
@@ -202,11 +202,9 @@ def __init__(self,
out_channels,
(kernel_size[0], 1),
(stride, 1),
- padding,
- ),
+ padding, ),
nn.BatchNorm2D(out_channels),
- nn.Dropout(dropout),
- )
+ nn.Dropout(dropout), )
if not residual:
self.residual = zero
@@ -216,12 +214,12 @@ def __init__(self,
else:
self.residual = nn.Sequential(
- nn.Conv2D(in_channels,
- out_channels,
- kernel_size=1,
- stride=(stride, 1)),
- nn.BatchNorm2D(out_channels),
- )
+ nn.Conv2D(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ stride=(stride, 1)),
+ nn.BatchNorm2D(out_channels), )
self.relu = nn.ReLU()
@@ -242,6 +240,7 @@ class STGCN(nn.Layer):
edge_importance_weighting: bool, whether to use edge attention. Default True.
data_bn: bool, whether to use data BatchNorm. Default True.
"""
+
def __init__(self,
in_channels=2,
edge_importance_weighting=True,
@@ -254,8 +253,7 @@ def __init__(self,
# load graph
self.graph = Graph(
layout=layout,
- strategy=strategy,
- )
+ strategy=strategy, )
A = paddle.to_tensor(self.graph.A, dtype='float32')
self.register_buffer('A', A)
@@ -267,12 +265,8 @@ def __init__(self,
A.shape[1]) if self.data_bn else iden
kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'}
self.st_gcn_networks = nn.LayerList((
- st_gcn_block(in_channels,
- 64,
- kernel_size,
- 1,
- residual=False,
- **kwargs0),
+ st_gcn_block(
+ in_channels, 64, kernel_size, 1, residual=False, **kwargs0),
st_gcn_block(64, 64, kernel_size, 1, **kwargs),
st_gcn_block(64, 64, kernel_size, 1, **kwargs),
st_gcn_block(64, 64, kernel_size, 1, **kwargs),
@@ -281,8 +275,7 @@ def __init__(self,
st_gcn_block(128, 128, kernel_size, 1, **kwargs),
st_gcn_block(128, 256, kernel_size, 2, **kwargs),
st_gcn_block(256, 256, kernel_size, 1, **kwargs),
- st_gcn_block(256, 256, kernel_size, 1, **kwargs),
- ))
+ st_gcn_block(256, 256, kernel_size, 1, **kwargs), ))
# initialize parameters for edge importance weighting
if edge_importance_weighting:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/swin_transformer.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/swin_transformer.py
old mode 100644
new mode 100755
index aaed21790..042a75d69
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/swin_transformer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/swin_transformer.py
@@ -49,6 +49,7 @@ def drop_path(x, drop_prob=0., training=False):
class DropPath(nn.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
+
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
@@ -59,6 +60,7 @@ def forward(self, x):
class Mlp(nn.Layer):
""" Multilayer perceptron."""
+
def __init__(self,
in_features,
hidden_features=None,
@@ -156,6 +158,7 @@ class WindowAttention3D(nn.Layer):
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
+
def __init__(self,
dim,
window_size,
@@ -176,8 +179,7 @@ def __init__(self,
self.relative_position_bias_table = self.create_parameter(
shape=((2 * window_size[0] - 1) * (2 * window_size[1] - 1) *
(2 * window_size[2] - 1), num_heads),
- default_initializer=zeros_,
- ) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH
+ default_initializer=zeros_, ) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH
self.add_parameter("relative_position_bias_table",
self.relative_position_bias_table)
# get pair-wise relative position index for each token inside the window
@@ -192,15 +194,15 @@ def __init__(self,
axis=2) - coords_flatten.unsqueeze(axis=1) # 3, Wd*Wh*Ww, Wd*Wh*Ww
# relative_coords = coords_flatten.unsqueeze(2) - coords_flatten.unsqueeze(1) # 3, Wd*Wh*Ww, Wd*Wh*Ww
- relative_coords = relative_coords.transpose([1, 2, 0
- ]) # Wd*Wh*Ww, Wd*Wh*Ww, 3
- relative_coords[:, :,
- 0] += self.window_size[0] - 1 # shift to start from 0
+ relative_coords = relative_coords.transpose(
+ [1, 2, 0]) # Wd*Wh*Ww, Wd*Wh*Ww, 3
+ relative_coords[:, :, 0] += self.window_size[
+ 0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 2] += self.window_size[2] - 1
- relative_coords[:, :, 0] *= (2 * self.window_size[1] -
- 1) * (2 * self.window_size[2] - 1)
+ relative_coords[:, :, 0] *= (
+ 2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
relative_coords[:, :, 1] *= (2 * self.window_size[2] - 1)
relative_position_index = relative_coords.sum(
axis=-1) # Wd*Wh*Ww, Wd*Wh*Ww
@@ -227,7 +229,7 @@ def forward(self, x, mask=None):
q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C
q = q * self.scale
- attn = q @ k.transpose([0, 1, 3, 2])
+ attn = q @k.transpose([0, 1, 3, 2])
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:N, :N].reshape([-1])].reshape(
@@ -247,7 +249,7 @@ def forward(self, x, mask=None):
attn = self.attn_drop(attn)
- x = (attn @ v).transpose([0, 2, 1, 3]).reshape([B_, N, C])
+ x = (attn @v).transpose([0, 2, 1, 3]).reshape([B_, N, C])
x = self.proj(x)
x = self.proj_drop(x)
return x
@@ -270,6 +272,7 @@ class SwinTransformerBlock3D(nn.Layer):
act_layer (nn.Layer, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm
"""
+
def __init__(self,
dim,
num_heads,
@@ -300,13 +303,14 @@ def __init__(self,
2], "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
- self.attn = WindowAttention3D(dim,
- window_size=self.window_size,
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop)
+ self.attn = WindowAttention3D(
+ dim,
+ window_size=self.window_size,
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ attn_drop=attn_drop,
+ proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity()
self.norm2 = norm_layer(dim)
@@ -333,10 +337,10 @@ def forward_part1(self, x, mask_matrix):
_, Dp, Hp, Wp, _ = x.shape
# cyclic shift
if any(i > 0 for i in shift_size):
- shifted_x = paddle.roll(x,
- shifts=(-shift_size[0], -shift_size[1],
- -shift_size[2]),
- axis=(1, 2, 3))
+ shifted_x = paddle.roll(
+ x,
+ shifts=(-shift_size[0], -shift_size[1], -shift_size[2]),
+ axis=(1, 2, 3))
attn_mask = mask_matrix
else:
shifted_x = x
@@ -352,10 +356,10 @@ def forward_part1(self, x, mask_matrix):
Wp) # B D' H' W' C
# reverse cyclic shift
if any(i > 0 for i in shift_size):
- x = paddle.roll(shifted_x,
- shifts=(shift_size[0], shift_size[1],
- shift_size[2]),
- axis=(1, 2, 3))
+ x = paddle.roll(
+ shifted_x,
+ shifts=(shift_size[0], shift_size[1], shift_size[2]),
+ axis=(1, 2, 3))
else:
x = shifted_x
@@ -389,6 +393,7 @@ class PatchMerging(nn.Layer):
dim (int): Number of input channels.
norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm
"""
+
def __init__(self, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
@@ -425,15 +430,13 @@ def forward(self, x):
def compute_mask(D, H, W, window_size, shift_size):
img_mask = paddle.zeros((1, D, H, W, 1)) # 1 Dp Hp Wp 1
cnt = 0
- for d in slice(-window_size[0]), slice(-window_size[0],
- -shift_size[0]), slice(
- -shift_size[0], None):
- for h in slice(-window_size[1]), slice(-window_size[1],
- -shift_size[1]), slice(
- -shift_size[1], None):
- for w in slice(-window_size[2]), slice(-window_size[2],
- -shift_size[2]), slice(
- -shift_size[2], None):
+ for d in slice(-window_size[0]), slice(
+ -window_size[0], -shift_size[0]), slice(-shift_size[0], None):
+ for h in slice(-window_size[1]), slice(
+ -window_size[1], -shift_size[1]), slice(-shift_size[1], None):
+ for w in slice(-window_size[2]), slice(
+ -window_size[2], -shift_size[2]), slice(-shift_size[2],
+ None):
img_mask[:, d, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask,
@@ -463,6 +466,7 @@ class BasicLayer(nn.Layer):
norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Layer | None, optional): Downsample layer at the end of the layer. Default: None
"""
+
def __init__(self,
dim,
depth,
@@ -498,8 +502,7 @@ def __init__(self,
drop_path=drop_path[i]
if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
- use_checkpoint=use_checkpoint,
- ) for i in range(depth)
+ use_checkpoint=use_checkpoint, ) for i in range(depth)
])
self.downsample = downsample
@@ -542,6 +545,7 @@ class PatchEmbed3D(nn.Layer):
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Layer, optional): Normalization layer. Default: None
"""
+
def __init__(self,
patch_size=(2, 4, 4),
in_chans=3,
@@ -553,10 +557,8 @@ def __init__(self,
self.in_chans = in_chans
self.embed_dim = embed_dim
- self.proj = nn.Conv3D(in_chans,
- embed_dim,
- kernel_size=patch_size,
- stride=patch_size)
+ self.proj = nn.Conv3D(
+ in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
@@ -611,6 +613,7 @@ class SwinTransformer3D(nn.Layer):
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
"""
+
def __init__(self,
pretrained=None,
patch_size=(4, 4, 4),
@@ -714,9 +717,8 @@ def init_weights(self):
self.apply(self._init_fn)
"""Second, if provide pretrained ckpt, load it"""
- if isinstance(
- self.pretrained, str
- ) and self.pretrained.strip() != "": # load pretrained weights
+ if isinstance(self.pretrained, str) and self.pretrained.strip(
+ ) != "": # load pretrained weights
load_ckpt(self, self.pretrained)
elif self.pretrained is None or self.pretrained.strip() == "":
pass
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/transnetv2.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/transnetv2.py
old mode 100644
new mode 100755
index 60603e2c9..1de0f0ce9
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/transnetv2.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/transnetv2.py
@@ -23,33 +23,56 @@
class OctConv3D(nn.Layer):
- def __init__(self, in_filters, filters, kernel_size=3, dilation_rate=(1, 1, 1), alpha=0.25,
- use_bias=True, kernel_initializer=nn.initializer.KaimingNormal()):
+ def __init__(self,
+ in_filters,
+ filters,
+ kernel_size=3,
+ dilation_rate=(1, 1, 1),
+ alpha=0.25,
+ use_bias=True,
+ kernel_initializer=nn.initializer.KaimingNormal()):
super(OctConv3D, self).__init__()
self.low_channels = int(filters * alpha)
self.high_channels = filters - self.low_channels
- self.high_to_high = nn.Conv3D(in_filters, self.high_channels, kernel_size=kernel_size,
- dilation=dilation_rate, padding=(dilation_rate[0], 1, 1),
- weight_attr=ParamAttr(initializer=kernel_initializer),
- bias_attr=ParamAttr(
- initializer=nn.initializer.Constant(value=0.)) if use_bias else use_bias)
- self.high_to_low = nn.Conv3D(self.high_channels, self.low_channels, kernel_size=kernel_size,
- dilation=dilation_rate, padding=(dilation_rate[0], 1, 1),
- weight_attr=ParamAttr(initializer=kernel_initializer),
- bias_attr=False)
- self.low_to_high = nn.Conv3D(in_filters, self.high_channels, kernel_size=kernel_size,
- dilation=dilation_rate, padding=(dilation_rate[0], 1, 1),
- weight_attr=ParamAttr(initializer=kernel_initializer),
- bias_attr=False)
- self.low_to_low = nn.Conv3D(self.high_channels, self.low_channels, kernel_size=kernel_size,
- dilation=dilation_rate, padding=(dilation_rate[0], 1, 1),
- weight_attr=ParamAttr(initializer=kernel_initializer),
- bias_attr=ParamAttr(
- initializer=nn.initializer.Constant(value=0.)) if use_bias else use_bias)
+ self.high_to_high = nn.Conv3D(
+ in_filters,
+ self.high_channels,
+ kernel_size=kernel_size,
+ dilation=dilation_rate,
+ padding=(dilation_rate[0], 1, 1),
+ weight_attr=ParamAttr(initializer=kernel_initializer),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.))
+ if use_bias else use_bias)
+ self.high_to_low = nn.Conv3D(
+ self.high_channels,
+ self.low_channels,
+ kernel_size=kernel_size,
+ dilation=dilation_rate,
+ padding=(dilation_rate[0], 1, 1),
+ weight_attr=ParamAttr(initializer=kernel_initializer),
+ bias_attr=False)
+ self.low_to_high = nn.Conv3D(
+ in_filters,
+ self.high_channels,
+ kernel_size=kernel_size,
+ dilation=dilation_rate,
+ padding=(dilation_rate[0], 1, 1),
+ weight_attr=ParamAttr(initializer=kernel_initializer),
+ bias_attr=False)
+ self.low_to_low = nn.Conv3D(
+ self.high_channels,
+ self.low_channels,
+ kernel_size=kernel_size,
+ dilation=dilation_rate,
+ padding=(dilation_rate[0], 1, 1),
+ weight_attr=ParamAttr(initializer=kernel_initializer),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.))
+ if use_bias else use_bias)
self.upsampler = nn.Upsample(size=(1, 2, 2), data_format='NCDHW')
- self.downsampler = nn.AvgPool3D(kernel_size=(1, 2, 2), stride=(1, 2, 2), padding=(0, 1, 1))
+ self.downsampler = nn.AvgPool3D(
+ kernel_size=(1, 2, 2), stride=(1, 2, 2), padding=(0, 1, 1))
@staticmethod
def pad_to(tensor, target_shape):
@@ -70,8 +93,10 @@ def forward(self, inputs):
low_to_high = self.upsampler(self.low_to_high(low_inputs))
low_to_low = self.low_to_low(low_inputs)
- high_output = high_to_high[:, :, :, :low_to_high.shape[3], :low_to_high.shape[4]] + low_to_high
- low_output = low_to_low + high_to_low[:, :, :, :low_to_low.shape[3], :low_to_low.shape[4]]
+ high_output = high_to_high[:, :, :, :low_to_high.shape[3], :
+ low_to_high.shape[4]] + low_to_high
+ low_output = low_to_low + high_to_low[:, :, :, :low_to_low.shape[3], :
+ low_to_low.shape[4]]
return low_output, high_output
@@ -88,27 +113,48 @@ def __init__(self,
assert not (separable and octave)
if separable:
- conv1 = nn.Conv3D(in_filters, 2 * filters, kernel_size=(1, 3, 3),
- dilation=(1, 1, 1), padding=(0, 1, 1),
- weight_attr=ParamAttr(initializer=nn.initializer.KaimingNormal()),
- bias_attr=False)
- conv2 = nn.Conv3D(2 * filters, filters, kernel_size=(3, 1, 1),
- dilation=(dilation_rate, 1, 1), padding=(dilation_rate, 0, 0),
- weight_attr=ParamAttr(initializer=nn.initializer.KaimingNormal()),
- bias_attr=ParamAttr(
- initializer=nn.initializer.Constant(value=0.)) if use_bias else use_bias)
+ conv1 = nn.Conv3D(
+ in_filters,
+ 2 * filters,
+ kernel_size=(1, 3, 3),
+ dilation=(1, 1, 1),
+ padding=(0, 1, 1),
+ weight_attr=ParamAttr(
+ initializer=nn.initializer.KaimingNormal()),
+ bias_attr=False)
+ conv2 = nn.Conv3D(
+ 2 * filters,
+ filters,
+ kernel_size=(3, 1, 1),
+ dilation=(dilation_rate, 1, 1),
+ padding=(dilation_rate, 0, 0),
+ weight_attr=ParamAttr(
+ initializer=nn.initializer.KaimingNormal()),
+ bias_attr=ParamAttr(
+ initializer=nn.initializer.Constant(value=0.))
+ if use_bias else use_bias)
self.layers = nn.LayerList([conv1, conv2])
elif octave:
- conv = OctConv3D(in_filters, filters, kernel_size=3, dilation_rate=(dilation_rate, 1, 1),
- use_bias=use_bias,
- kernel_initializer=nn.initializer.KaimingNormal())
+ conv = OctConv3D(
+ in_filters,
+ filters,
+ kernel_size=3,
+ dilation_rate=(dilation_rate, 1, 1),
+ use_bias=use_bias,
+ kernel_initializer=nn.initializer.KaimingNormal())
self.layers = [conv]
else:
- conv = nn.Conv3D(in_filters, filters, kernel_size=3,
- dilation=(dilation_rate, 1, 1), padding=(dilation_rate, 1, 1),
- weight_attr=ParamAttr(initializer=nn.initializer.KaimingNormal()),
- bias_attr=ParamAttr(
- initializer=nn.initializer.Constant(value=0.)) if use_bias else use_bias)
+ conv = nn.Conv3D(
+ in_filters,
+ filters,
+ kernel_size=3,
+ dilation=(dilation_rate, 1, 1),
+ padding=(dilation_rate, 1, 1),
+ weight_attr=ParamAttr(
+ initializer=nn.initializer.KaimingNormal()),
+ bias_attr=ParamAttr(
+ initializer=nn.initializer.Constant(value=0.))
+ if use_bias else use_bias)
self.layers = nn.LayerList([conv])
def forward(self, inputs):
@@ -128,16 +174,24 @@ def __init__(self,
super(DilatedDCNNV2, self).__init__()
assert not (octave_conv and batch_norm)
- self.Conv3D_1 = Conv3DConfigurable(in_filters, filters, 1, use_bias=not batch_norm, octave=octave_conv)
- self.Conv3D_2 = Conv3DConfigurable(in_filters, filters, 2, use_bias=not batch_norm, octave=octave_conv)
- self.Conv3D_4 = Conv3DConfigurable(in_filters, filters, 4, use_bias=not batch_norm, octave=octave_conv)
- self.Conv3D_8 = Conv3DConfigurable(in_filters, filters, 8, use_bias=not batch_norm, octave=octave_conv)
+ self.Conv3D_1 = Conv3DConfigurable(
+ in_filters, filters, 1, use_bias=not batch_norm, octave=octave_conv)
+ self.Conv3D_2 = Conv3DConfigurable(
+ in_filters, filters, 2, use_bias=not batch_norm, octave=octave_conv)
+ self.Conv3D_4 = Conv3DConfigurable(
+ in_filters, filters, 4, use_bias=not batch_norm, octave=octave_conv)
+ self.Conv3D_8 = Conv3DConfigurable(
+ in_filters, filters, 8, use_bias=not batch_norm, octave=octave_conv)
self.octave = octave_conv
- self.bn = nn.BatchNorm3D(filters * 4, momentum=0.99, epsilon=1e-03,
- weight_attr=ParamAttr(initializer=nn.initializer.Constant(value=1.)),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.))
- ) if batch_norm else None
+ self.bn = nn.BatchNorm3D(
+ filters * 4,
+ momentum=0.99,
+ epsilon=1e-03,
+ weight_attr=ParamAttr(initializer=nn.initializer.Constant(
+ value=1.)),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(
+ value=0.))) if batch_norm else None
self.activation = activation
def forward(self, inputs):
@@ -148,8 +202,12 @@ def forward(self, inputs):
# shape of convi[j]/convi is [B, 3, T, H, W], concat in channel dimension
if self.octave:
- x = [paddle.concat([conv1[0], conv2[0], conv3[0], conv4[0]], axis=1),
- paddle.concat([conv1[1], conv2[1], conv3[1], conv4[1]], axis=1)]
+ x = [
+ paddle.concat(
+ [conv1[0], conv2[0], conv3[0], conv4[0]], axis=1),
+ paddle.concat(
+ [conv1[1], conv2[1], conv3[1], conv4[1]], axis=1)
+ ]
else:
x = paddle.concat([conv1, conv2, conv3, conv4], axis=1)
@@ -176,14 +234,22 @@ def __init__(self,
super(StackedDDCNNV2, self).__init__()
assert pool_type == "max" or pool_type == "avg"
if use_octave_conv and pool_type == "max":
- print("WARN: Octave convolution was designed with average pooling, not max pooling.")
+ print(
+ "WARN: Octave convolution was designed with average pooling, not max pooling."
+ )
self.shortcut = shortcut
self.DDCNN = nn.LayerList([
- DilatedDCNNV2(in_filters if i == 1 else filters * 4, filters, octave_conv=use_octave_conv,
- activation=functional.relu if i != n_blocks else None) for i in range(1, n_blocks + 1)
+ DilatedDCNNV2(
+ in_filters if i == 1 else filters * 4,
+ filters,
+ octave_conv=use_octave_conv,
+ activation=functional.relu if i != n_blocks else None)
+ for i in range(1, n_blocks + 1)
])
- self.pool = nn.MaxPool3D(kernel_size=(1, 2, 2)) if pool_type == "max" else nn.AvgPool3D(kernel_size=(1, 2, 2))
+ self.pool = nn.MaxPool3D(kernel_size=(
+ 1, 2, 2)) if pool_type == "max" else nn.AvgPool3D(kernel_size=(1, 2,
+ 2))
self.octave = use_octave_conv
self.stochastic_depth_drop_prob = stochastic_depth_drop_prob
@@ -224,19 +290,32 @@ class ResNetBlock(nn.Layer):
def __init__(self, in_filters, filters, strides=(1, 1)):
super(ResNetBlock, self).__init__()
- self.conv1 = nn.Conv2D(in_filters, filters, kernel_size=(3, 3), stride=strides, padding=(1, 1),
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=False)
- self.bn1 = nn.BatchNorm2D(filters,
- weight_attr=ParamAttr(initializer=nn.initializer.Constant(value=1.)),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
-
- self.conv2 = nn.Conv2D(filters, filters, kernel_size=(3, 3), padding=(1, 1),
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=False)
- self.bn2 = nn.BatchNorm2D(filters,
- weight_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
+ self.conv1 = nn.Conv2D(
+ in_filters,
+ filters,
+ kernel_size=(3, 3),
+ stride=strides,
+ padding=(1, 1),
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=False)
+ self.bn1 = nn.BatchNorm2D(
+ filters,
+ weight_attr=ParamAttr(initializer=nn.initializer.Constant(
+ value=1.)),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
+
+ self.conv2 = nn.Conv2D(
+ filters,
+ filters,
+ kernel_size=(3, 3),
+ padding=(1, 1),
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=False)
+ self.bn2 = nn.BatchNorm2D(
+ filters,
+ weight_attr=ParamAttr(initializer=nn.initializer.Constant(
+ value=0.)),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
def forward(self, inputs):
x = self.conv1(inputs)
@@ -253,19 +332,28 @@ def forward(self, inputs):
class ResNetFeatures(nn.Layer):
- def __init__(self, in_filters=3,
+ def __init__(self,
+ in_filters=3,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]):
super(ResNetFeatures, self).__init__()
- self.conv1 = nn.Conv2D(in_channels=in_filters, out_channels=64, kernel_size=(7, 7),
- stride=(2, 2), padding=(3, 3),
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=False)
- self.bn1 = nn.BatchNorm2D(num_features=64, momentum=0.99, epsilon=1e-03,
- weight_attr=ParamAttr(initializer=nn.initializer.Constant(value=1.)),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.))
- )
- self.max_pool = nn.MaxPool2D(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
+ self.conv1 = nn.Conv2D(
+ in_channels=in_filters,
+ out_channels=64,
+ kernel_size=(7, 7),
+ stride=(2, 2),
+ padding=(3, 3),
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=False)
+ self.bn1 = nn.BatchNorm2D(
+ num_features=64,
+ momentum=0.99,
+ epsilon=1e-03,
+ weight_attr=ParamAttr(initializer=nn.initializer.Constant(
+ value=1.)),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
+ self.max_pool = nn.MaxPool2D(
+ kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.layer2a = ResNetBlock(64, 64)
self.layer2b = ResNetBlock(64, 64)
@@ -275,7 +363,8 @@ def __init__(self, in_filters=3,
def forward(self, inputs):
shape = inputs.shape
- x = paddle.reshape(inputs, [shape[0] * shape[2], shape[1], shape[3], shape[4]])
+ x = paddle.reshape(inputs,
+ [shape[0] * shape[2], shape[1], shape[3], shape[4]])
x = (x - self.mean) / self.std
x = self.conv1(x)
@@ -286,7 +375,8 @@ def forward(self, inputs):
x = self.layer2b(x)
new_shape = x.shape
- x = paddle.reshape(x, [shape[0], new_shape[1], shape[2], new_shape[2], new_shape[3]])
+ x = paddle.reshape(
+ x, [shape[0], new_shape[1], shape[2], new_shape[2], new_shape[3]])
return x
@@ -299,12 +389,16 @@ def __init__(self,
stop_gradient=False,
use_bias=False):
super(FrameSimilarity, self).__init__()
- self.projection = nn.Linear(in_filters, similarity_dim,
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=use_bias)
- self.fc = nn.Linear(lookup_window, output_dim,
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
+ self.projection = nn.Linear(
+ in_filters,
+ similarity_dim,
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=use_bias)
+ self.fc = nn.Linear(
+ lookup_window,
+ output_dim,
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
self.lookup_window = lookup_window
self.stop_gradient = stop_gradient
@@ -319,37 +413,64 @@ def forward(self, inputs):
x = self.projection(x)
x = functional.normalize(x, p=2, axis=2)
- batch_size = paddle.slice(x.shape, starts=[0], ends=[1], axes=[0]) if x.shape[0] == -1 else x.shape[0]
+ batch_size = paddle.slice(
+ x.shape, starts=[0], ends=[1],
+ axes=[0]) if x.shape[0] == -1 else x.shape[0]
time_window = x.shape[1]
- similarities = paddle.bmm(x, x.transpose([0, 2, 1])) # [batch_size, time_window, time_window]
+ similarities = paddle.bmm(x, x.transpose(
+ [0, 2, 1])) # [batch_size, time_window, time_window]
- similarities_padded = functional.pad(similarities,
- [(self.lookup_window - 1) // 2, (self.lookup_window - 1) // 2],
- data_format='NCL')
+ similarities_padded = functional.pad(
+ similarities,
+ [(self.lookup_window - 1) // 2, (self.lookup_window - 1) // 2],
+ data_format='NCL')
batch_indices = paddle.arange(0, batch_size).reshape([batch_size, 1, 1])
- batch_indices = paddle.tile(batch_indices, [1, time_window, self.lookup_window])
- time_indices = paddle.arange(0, time_window).reshape([1, time_window, 1])
- time_indices = paddle.tile(time_indices, [batch_size, 1, self.lookup_window])
- lookup_indices = paddle.arange(0, self.lookup_window).reshape([1, 1, self.lookup_window])
- lookup_indices = paddle.tile(lookup_indices, [batch_size, time_window, 1]) + time_indices
- indices = paddle.stack([batch_indices, time_indices, lookup_indices], -1)
+ batch_indices = paddle.tile(batch_indices,
+ [1, time_window, self.lookup_window])
+ time_indices = paddle.arange(0, time_window).reshape(
+ [1, time_window, 1])
+ time_indices = paddle.tile(time_indices,
+ [batch_size, 1, self.lookup_window])
+ lookup_indices = paddle.arange(0, self.lookup_window).reshape(
+ [1, 1, self.lookup_window])
+ lookup_indices = paddle.tile(
+ lookup_indices, [batch_size, time_window, 1]) + time_indices
+ indices = paddle.stack([batch_indices, time_indices, lookup_indices],
+ -1)
similarities = paddle.gather_nd(similarities_padded, indices)
return functional.relu(self.fc(similarities))
class ConvexCombinationRegularization(nn.Layer):
- def __init__(self, in_filters, filters=32, delta_scale=10., loss_weight=0.01):
+ def __init__(self,
+ in_filters,
+ filters=32,
+ delta_scale=10.,
+ loss_weight=0.01):
super(ConvexCombinationRegularization, self).__init__()
- self.projection = nn.Conv3D(in_filters, filters, kernel_size=1, dilation=1, padding=(0, 0, 0),
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
- self.features = nn.Conv3D((filters * 3), filters * 2,
- kernel_size=(3, 3, 3), dilation=1, padding=(1, 1, 1),
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
- self.dense = nn.Linear(64, 1, weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()), bias_attr=True)
+ self.projection = nn.Conv3D(
+ in_filters,
+ filters,
+ kernel_size=1,
+ dilation=1,
+ padding=(0, 0, 0),
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
+ self.features = nn.Conv3D(
+ (filters * 3),
+ filters * 2,
+ kernel_size=(3, 3, 3),
+ dilation=1,
+ padding=(1, 1, 1),
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
+ self.dense = nn.Linear(
+ 64,
+ 1,
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=True)
self.loss = nn.SmoothL1Loss(reduction='none')
self.delta_scale = delta_scale
self.loss_weight = loss_weight
@@ -370,27 +491,31 @@ def forward(self, image_inputs, feature_inputs):
alpha = self.dense(x)
alpha = paddle.transpose(alpha, (0, 2, 1))
- first_img = paddle.tile(image_inputs[:, :, :1], [1, 1, window_size, 1, 1])
- last_img = paddle.tile(image_inputs[:, :, -1:], [1, 1, window_size, 1, 1])
+ first_img = paddle.tile(image_inputs[:, :, :1],
+ [1, 1, window_size, 1, 1])
+ last_img = paddle.tile(image_inputs[:, :, -1:],
+ [1, 1, window_size, 1, 1])
alpha_ = functional.sigmoid(alpha)
alpha_ = paddle.reshape(alpha_, [batch_size, 1, window_size, 1, 1])
predictions_ = (alpha_ * first_img + (1 - alpha_) * last_img)
- loss_ = self.loss(label=image_inputs / self.delta_scale, input=predictions_ / self.delta_scale)
+ loss_ = self.loss(
+ label=image_inputs / self.delta_scale,
+ input=predictions_ / self.delta_scale)
loss_ = self.loss_weight * paddle.mean(loss_)
return alpha, loss_
class ColorHistograms(nn.Layer):
- def __init__(self,
- lookup_window=101,
- output_dim=None):
+ def __init__(self, lookup_window=101, output_dim=None):
super(ColorHistograms, self).__init__()
- self.fc = nn.Linear(lookup_window, output_dim,
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=ParamAttr(
- initializer=nn.initializer.Constant(value=0.))) if output_dim is not None else None
+ self.fc = nn.Linear(
+ lookup_window,
+ output_dim,
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(
+ value=0.))) if output_dim is not None else None
self.lookup_window = lookup_window
assert lookup_window % 2 == 1, "`lookup_window` must be odd integer"
@@ -403,7 +528,9 @@ def get_bin(frames):
R, G, B = R // 32, G // 32, B // 32
return (R * 64) + (G * 8) + B
- batch_size = paddle.slice(frames.shape, starts=[0], ends=[1], axes=[0]) if frames.shape[0] == -1 else frames.shape[0]
+ batch_size = paddle.slice(
+ frames.shape, starts=[0], ends=[1],
+ axes=[0]) if frames.shape[0] == -1 else frames.shape[0]
time_window, height, width, no_channels = frames.shape[1:]
assert no_channels == 3 or no_channels == 6
@@ -414,31 +541,47 @@ def get_bin(frames):
binned_values = get_bin(frames_flatten)
- frame_bin_prefix = (paddle.arange(0, batch_size * time_window) * 512).reshape([-1, 1])
+ frame_bin_prefix = (paddle.arange(0, batch_size * time_window) *
+ 512).reshape([-1, 1])
binned_values = (binned_values + frame_bin_prefix).reshape([-1, 1])
- histograms = paddle.zeros_like(frame_bin_prefix, dtype='int32').tile([512]).reshape([-1])
- histograms = histograms.scatter_nd_add(binned_values, paddle.ones_like(binned_values, dtype='int32').reshape([-1]))
- histograms = histograms.reshape([batch_size, time_window, 512]).astype('float32')
+ histograms = paddle.zeros_like(
+ frame_bin_prefix, dtype='int32').tile([512]).reshape([-1])
+ histograms = histograms.scatter_nd_add(
+ binned_values,
+ paddle.ones_like(
+ binned_values, dtype='int32').reshape([-1]))
+ histograms = histograms.reshape(
+ [batch_size, time_window, 512]).astype('float32')
histograms_normalized = functional.normalize(histograms, p=2, axis=2)
return histograms_normalized
def forward(self, inputs):
x = self.compute_color_histograms(inputs)
- batch_size = paddle.slice(x.shape, starts=[0], ends=[1], axes=[0]) if x.shape[0] == -1 else x.shape[0]
+ batch_size = paddle.slice(
+ x.shape, starts=[0], ends=[1],
+ axes=[0]) if x.shape[0] == -1 else x.shape[0]
time_window = x.shape[1]
- similarities = paddle.bmm(x, x.transpose([0, 2, 1])) # [batch_size, time_window, time_window]
- similarities_padded = functional.pad(similarities,
- [(self.lookup_window - 1) // 2, (self.lookup_window - 1) // 2],
- data_format='NCL')
+ similarities = paddle.bmm(x, x.transpose(
+ [0, 2, 1])) # [batch_size, time_window, time_window]
+ similarities_padded = functional.pad(
+ similarities,
+ [(self.lookup_window - 1) // 2, (self.lookup_window - 1) // 2],
+ data_format='NCL')
batch_indices = paddle.arange(0, batch_size).reshape([batch_size, 1, 1])
- batch_indices = paddle.tile(batch_indices, [1, time_window, self.lookup_window])
- time_indices = paddle.arange(0, time_window).reshape([1, time_window, 1])
- time_indices = paddle.tile(time_indices, [batch_size, 1, self.lookup_window])
- lookup_indices = paddle.arange(0, self.lookup_window).reshape([1, 1, self.lookup_window])
- lookup_indices = paddle.tile(lookup_indices, [batch_size, time_window, 1]) + time_indices
-
- indices = paddle.stack([batch_indices, time_indices, lookup_indices], -1)
+ batch_indices = paddle.tile(batch_indices,
+ [1, time_window, self.lookup_window])
+ time_indices = paddle.arange(0, time_window).reshape(
+ [1, time_window, 1])
+ time_indices = paddle.tile(time_indices,
+ [batch_size, 1, self.lookup_window])
+ lookup_indices = paddle.arange(0, self.lookup_window).reshape(
+ [1, 1, self.lookup_window])
+ lookup_indices = paddle.tile(
+ lookup_indices, [batch_size, time_window, 1]) + time_indices
+
+ indices = paddle.stack([batch_indices, time_indices, lookup_indices],
+ -1)
similarities = paddle.gather_nd(similarities_padded, indices)
if self.fc is not None:
@@ -451,8 +594,12 @@ class TransNetV2(nn.Layer):
"""TransNetV2 model from
`"TransNet V2: An effective deep network architecture for fast shot transition detection" `_
"""
+
def __init__(self,
- F=16, L=3, S=2, D=1024,
+ F=16,
+ L=3,
+ S=2,
+ D=1024,
use_many_hot_targets=True,
use_frame_similarity=True,
use_color_histograms=True,
@@ -470,20 +617,30 @@ def __init__(self,
self.std = np.array(std, np.float32).reshape([1, 3, 1, 1]) * 255
self.use_resnet_features = use_resnet_features
- self.resnet_layers = ResNetFeatures(in_filters=3, mean=self.mean, std=self.std) if self.use_resnet_features else None
+ self.resnet_layers = ResNetFeatures(
+ in_filters=3, mean=self.mean,
+ std=self.std) if self.use_resnet_features else None
self.resnet_like_top = use_resnet_like_top
if self.resnet_like_top:
- self.resnet_like_top_conv = nn.Conv3D(64 if self.use_resnet_features else 3, 32, kernel_size=(3, 7, 7),
- stride=(1, 2, 2),
- padding=(1, 3, 3),
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=False)
- self.resnet_like_top_bn = nn.BatchNorm3D(32, momentum=0.99, epsilon=1e-03,
- weight_attr=ParamAttr(
- initializer=nn.initializer.Constant(value=1.)),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
- self.resnet_like_top_max_pool = nn.MaxPool3D(kernel_size=(1, 3, 3), stride=(1, 2, 2),
- padding=(0, 1, 1))
+ self.resnet_like_top_conv = nn.Conv3D(
+ 64 if self.use_resnet_features else 3,
+ 32,
+ kernel_size=(3, 7, 7),
+ stride=(1, 2, 2),
+ padding=(1, 3, 3),
+ weight_attr=ParamAttr(
+ initializer=nn.initializer.XavierUniform()),
+ bias_attr=False)
+ self.resnet_like_top_bn = nn.BatchNorm3D(
+ 32,
+ momentum=0.99,
+ epsilon=1e-03,
+ weight_attr=ParamAttr(
+ initializer=nn.initializer.Constant(value=1.)),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(
+ value=0.)))
+ self.resnet_like_top_max_pool = nn.MaxPool3D(
+ kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
if self.resnet_like_top:
in_filters = 32
@@ -491,23 +648,32 @@ def __init__(self,
in_filters = 64
else:
in_filters = 3
- self.SDDCNN = nn.LayerList(
- [StackedDDCNNV2(in_filters=in_filters, n_blocks=S, filters=F,
- stochastic_depth_drop_prob=0.)] +
- [StackedDDCNNV2(in_filters=(F * 2 ** (i - 1)) * 4, n_blocks=S, filters=F * 2 ** i) for i in range(1, L)]
- )
+ self.SDDCNN = nn.LayerList([
+ StackedDDCNNV2(
+ in_filters=in_filters,
+ n_blocks=S,
+ filters=F,
+ stochastic_depth_drop_prob=0.)
+ ] + [
+ StackedDDCNNV2(
+ in_filters=(F * 2**(i - 1)) * 4, n_blocks=S, filters=F * 2**i)
+ for i in range(1, L)
+ ])
self.frame_sim_layer = FrameSimilarity(
- sum([(F * 2 ** i) * 4 for i in range(L)]), lookup_window=101, output_dim=128, similarity_dim=128,
- use_bias=True
- ) if use_frame_similarity else None
+ sum([(F * 2**i) * 4 for i in range(L)]),
+ lookup_window=101,
+ output_dim=128,
+ similarity_dim=128,
+ use_bias=True) if use_frame_similarity else None
self.color_hist_layer = ColorHistograms(
- lookup_window=101, output_dim=128
- ) if use_color_histograms else None
+ lookup_window=101, output_dim=128) if use_color_histograms else None
- self.dropout = nn.Dropout(dropout_rate) if dropout_rate is not None else None
+ self.dropout = nn.Dropout(
+ dropout_rate) if dropout_rate is not None else None
- output_dim = ((F * 2 ** (L - 1)) * 4) * 3 * 6 # 3x6 for spatial dimensions
+ output_dim = (
+ (F * 2**(L - 1)) * 4) * 3 * 6 # 3x6 for spatial dimensions
if use_frame_similarity: output_dim += 128
if use_color_histograms: output_dim += 128
@@ -516,22 +682,26 @@ def __init__(self,
self.has_downsample = False
if self.use_resnet_features or self.resnet_like_top or self.use_mean_pooling:
self.has_downsample = True
- self.fc1 = nn.Linear(512 if self.has_downsample else output_dim, D,
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.))
- )
+ self.fc1 = nn.Linear(
+ 512 if self.has_downsample else output_dim,
+ D,
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
self.frame_similarity_on_last_layer = frame_similarity_on_last_layer
- self.cls_layer1 = nn.Linear(1152 if self.frame_similarity_on_last_layer else D, 1,
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.))
- )
- self.cls_layer2 = nn.Linear(1152 if self.frame_similarity_on_last_layer else D, 1,
- weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
- bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.))
- ) if use_many_hot_targets else None
+ self.cls_layer1 = nn.Linear(
+ 1152 if self.frame_similarity_on_last_layer else D,
+ 1,
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(value=0.)))
+ self.cls_layer2 = nn.Linear(
+ 1152 if self.frame_similarity_on_last_layer else D,
+ 1,
+ weight_attr=ParamAttr(initializer=nn.initializer.XavierUniform()),
+ bias_attr=ParamAttr(initializer=nn.initializer.Constant(
+ value=0.))) if use_many_hot_targets else None
self.convex_comb_reg = ConvexCombinationRegularization(
- in_filters=(F * 2 ** (L - 1) * 4)) if use_convex_comb_reg else None
+ in_filters=(F * 2**(L - 1) * 4)) if use_convex_comb_reg else None
def forward(self, inputs):
assert list(inputs.shape[2:]) == [27, 48, 3] and inputs.dtype == paddle.float32, \
@@ -554,13 +724,16 @@ def forward(self, inputs):
x = block(x)
block_features.append(x)
if self.convex_comb_reg is not None:
- out_dict["alphas"], out_dict["comb_reg_loss"] = self.convex_comb_reg(inputs.transpose([0, 4, 1, 2, 3]), x)
+ out_dict["alphas"], out_dict[
+ "comb_reg_loss"] = self.convex_comb_reg(
+ inputs.transpose([0, 4, 1, 2, 3]), x)
if self.use_mean_pooling:
x = paddle.mean(x, axis=[3, 4])
x = x.transpose([0, 2, 1])
else:
x = x.transpose([0, 2, 3, 4, 1])
- x = x.reshape([x.shape[0], x.shape[1], x.shape[2]*x.shape[3]*x.shape[4]])
+ x = x.reshape(
+ [x.shape[0], x.shape[1], x.shape[2] * x.shape[3] * x.shape[4]])
if self.frame_sim_layer is not None:
x = paddle.concat([self.frame_sim_layer(block_features), x], 2)
if self.color_hist_layer is not None:
@@ -579,4 +752,3 @@ def forward(self, inputs):
return one_hot, out_dict
return one_hot
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/vit.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/vit.py
old mode 100644
new mode 100755
index 6e7217121..c158a44b7
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/vit.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/vit.py
@@ -54,6 +54,7 @@ def drop_path(x, drop_prob=0., training=False):
class DropPath(nn.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
+
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
@@ -155,12 +156,13 @@ def __init__(self,
raise TypeError(
"The norm_layer must be str or paddle.nn.layer.Layer class")
- self.attn = Attention(dim,
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop)
+ self.attn = Attention(
+ dim,
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ attn_drop=attn_drop,
+ proj_drop=drop)
# Temporal Attention Parameters
if self.attention_type == 'divided_space_time':
@@ -171,12 +173,13 @@ def __init__(self,
else:
raise TypeError(
"The norm_layer must be str or paddle.nn.layer.Layer class")
- self.temporal_attn = Attention(dim,
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop)
+ self.temporal_attn = Attention(
+ dim,
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ attn_drop=attn_drop,
+ proj_drop=drop)
self.temporal_fc = nn.Linear(dim, dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
@@ -243,8 +246,9 @@ def forward(self, x, B, T, W):
res = res_spatial
x = xt
- x = paddle.concat((init_cls_token, x), axis=1) + paddle.concat(
- (cls_token, res), axis=1)
+ x = paddle.concat(
+ (init_cls_token, x), axis=1) + paddle.concat(
+ (cls_token, res), axis=1)
# Mlp
x = x + self.drop_path(self.mlp(self.norm2(x)))
@@ -256,6 +260,7 @@ def forward(self, x, B, T, W):
class PatchEmbed(nn.Layer):
""" Image to Patch Embedding
"""
+
def __init__(self,
img_size=224,
patch_size=16,
@@ -270,10 +275,8 @@ def __init__(self,
self.patch_size = patch_size
self.num_patches = num_patches
- self.proj = nn.Conv2D(in_channels,
- embed_dim,
- kernel_size=patch_size,
- stride=patch_size)
+ self.proj = nn.Conv2D(
+ in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, T, H, W = x.shape
@@ -292,6 +295,7 @@ def forward(self, x):
class VisionTransformer(nn.Layer):
""" Vision Transformer with support for patch input
"""
+
def __init__(self,
pretrained=None,
img_size=224,
@@ -317,24 +321,23 @@ def __init__(self,
self.attention_type = attention_type
self.num_features = self.embed_dim = embed_dim
- self.patch_embed = PatchEmbed(img_size=img_size,
- patch_size=patch_size,
- in_channels=in_channels,
- embed_dim=embed_dim)
+ self.patch_embed = PatchEmbed(
+ img_size=img_size,
+ patch_size=patch_size,
+ in_channels=in_channels,
+ embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
# Positional Embeddings
- self.cls_token = self.create_parameter(shape=(1, 1, embed_dim),
- default_initializer=zeros_)
- self.pos_embed = self.create_parameter(shape=(1, num_patches + 1,
- embed_dim),
- default_initializer=zeros_)
+ self.cls_token = self.create_parameter(
+ shape=(1, 1, embed_dim), default_initializer=zeros_)
+ self.pos_embed = self.create_parameter(
+ shape=(1, num_patches + 1, embed_dim), default_initializer=zeros_)
self.pos_drop = nn.Dropout(p=drop_rate)
if self.attention_type != 'space_only':
- self.time_embed = self.create_parameter(shape=(1, num_seg,
- embed_dim),
- default_initializer=zeros_)
+ self.time_embed = self.create_parameter(
+ shape=(1, num_seg, embed_dim), default_initializer=zeros_)
self.time_drop = nn.Dropout(p=drop_rate)
self.add_parameter("pos_embed", self.pos_embed)
@@ -343,17 +346,18 @@ def __init__(self,
dpr = np.linspace(0, drop_path_rate, depth)
self.blocks = nn.LayerList([
- Block(dim=embed_dim,
- num_heads=num_heads,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[i],
- norm_layer=norm_layer,
- epsilon=epsilon,
- attention_type=self.attention_type) for i in range(depth)
+ Block(
+ dim=embed_dim,
+ num_heads=num_heads,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop_rate,
+ attn_drop=attn_drop_rate,
+ drop_path=dpr[i],
+ norm_layer=norm_layer,
+ epsilon=epsilon,
+ attention_type=self.attention_type) for i in range(depth)
])
self.norm = eval(norm_layer)(embed_dim, epsilon=epsilon)
@@ -374,14 +378,14 @@ def init_weights(self):
zeros_(m.temporal_fc.bias)
i += 1
"""Second, if provide pretrained ckpt, load it"""
- if isinstance(
- self.pretrained, str
- ) and self.pretrained.strip() != "": # load pretrained weights
- load_ckpt(self,
- self.pretrained,
- num_patches=self.patch_embed.num_patches,
- num_seg=self.num_seg,
- attention_type=self.attention_type)
+ if isinstance(self.pretrained, str) and self.pretrained.strip(
+ ) != "": # load pretrained weights
+ load_ckpt(
+ self,
+ self.pretrained,
+ num_patches=self.patch_embed.num_patches,
+ num_seg=self.num_seg,
+ attention_type=self.attention_type)
def _init_fn(self, m):
if isinstance(m, nn.Linear):
@@ -407,13 +411,12 @@ def forward_features(self, x):
P = int(other_pos_embed.shape[2]**0.5)
H = x.shape[1] // W
other_pos_embed = other_pos_embed.reshape([1, x.shape[2], P, P])
- new_pos_embed = F.interpolate(other_pos_embed,
- size=(H, W),
- mode='nearest')
+ new_pos_embed = F.interpolate(
+ other_pos_embed, size=(H, W), mode='nearest')
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose((0, 2, 1))
- new_pos_embed = paddle.concat((cls_pos_embed, new_pos_embed),
- axis=1)
+ new_pos_embed = paddle.concat(
+ (cls_pos_embed, new_pos_embed), axis=1)
x = x + new_pos_embed
else:
x = x + self.pos_embed
@@ -422,8 +425,9 @@ def forward_features(self, x):
# Time Embeddings
if self.attention_type != 'space_only':
- cls_tokens = x[:B, 0, :].unsqueeze(1) if B > 0 else x.split(
- T)[0].index_select(paddle.to_tensor([0]), axis=1)
+ cls_tokens = x[:B, 0, :].unsqueeze(1) if B > 0 else x.split(T)[
+ 0].index_select(
+ paddle.to_tensor([0]), axis=1)
x = x[:, 1:]
_, _n, _m = x.shape
_t = T
@@ -433,9 +437,9 @@ def forward_features(self, x):
time_interp = (T != self.time_embed.shape[1])
if time_interp: # T' != T
time_embed = self.time_embed.transpose((0, 2, 1)).unsqueeze(0)
- new_time_embed = F.interpolate(time_embed,
- size=(T, x.shape[-1]),
- mode='nearest').squeeze(0)
+ new_time_embed = F.interpolate(
+ time_embed, size=(T, x.shape[-1]),
+ mode='nearest').squeeze(0)
new_time_embed = new_time_embed.transpose((0, 2, 1))
x = x + new_time_embed
else:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/vit_tweaks.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/vit_tweaks.py
old mode 100644
new mode 100755
index 111697a02..08b30bc0e
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/vit_tweaks.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/backbones/vit_tweaks.py
@@ -76,6 +76,7 @@ def drop_path(x, drop_prob=0., training=False):
class DropPath(nn.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
+
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
@@ -182,14 +183,15 @@ def __init__(self,
raise TypeError(
"The norm_layer must be str or paddle.nn.layer.Layer class")
- self.attn = Attention(dim,
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop,
- wd_bias=wd_bias,
- lr_mult=lr_mult)
+ self.attn = Attention(
+ dim,
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ attn_drop=attn_drop,
+ proj_drop=drop,
+ wd_bias=wd_bias,
+ lr_mult=lr_mult)
# Temporal Attention Parameters
if self.attention_type == 'divided_space_time':
@@ -200,14 +202,15 @@ def __init__(self,
else:
raise TypeError(
"The norm_layer must be str or paddle.nn.layer.Layer class")
- self.temporal_attn = Attention(dim,
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop,
- wd_bias=wd_bias,
- lr_mult=lr_mult)
+ self.temporal_attn = Attention(
+ dim,
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ attn_drop=attn_drop,
+ proj_drop=drop,
+ wd_bias=wd_bias,
+ lr_mult=lr_mult)
self.temporal_fc = nn.Linear(dim, dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
@@ -277,8 +280,9 @@ def forward(self, x, B, T, W):
res = res_spatial
x = xt
- x = paddle.concat((init_cls_token, x), axis=1) + paddle.concat(
- (cls_token, res), axis=1)
+ x = paddle.concat(
+ (init_cls_token, x), axis=1) + paddle.concat(
+ (cls_token, res), axis=1)
# Mlp
x = x + self.drop_path(self.mlp(self.norm2(x)))
@@ -290,6 +294,7 @@ def forward(self, x, B, T, W):
class PatchEmbed(nn.Layer):
""" Image to Patch Embedding
"""
+
def __init__(self,
img_size=224,
patch_size=16,
@@ -306,10 +311,8 @@ def __init__(self,
self.patch_size = patch_size
self.num_patches = num_patches
- self.proj = nn.Conv2D(in_channels,
- embed_dim,
- kernel_size=patch_size,
- stride=patch_size)
+ self.proj = nn.Conv2D(
+ in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, T, H, W = x.shape
@@ -328,6 +331,7 @@ def forward(self, x):
class VisionTransformer_tweaks(nn.Layer):
""" Vision Transformer with support for patch input
"""
+
def __init__(self,
pretrained=None,
img_size=224,
@@ -356,12 +360,13 @@ def __init__(self,
self.lr_mult_list = lr_mult_list
self.num_features = self.embed_dim = embed_dim
- self.patch_embed = PatchEmbed(img_size=img_size,
- patch_size=patch_size,
- in_channels=in_channels,
- embed_dim=embed_dim,
- wd_bias=wd_bias,
- lr_mult=self.lr_mult_list[0])
+ self.patch_embed = PatchEmbed(
+ img_size=img_size,
+ patch_size=patch_size,
+ in_channels=in_channels,
+ embed_dim=embed_dim,
+ wd_bias=wd_bias,
+ lr_mult=self.lr_mult_list[0])
num_patches = self.patch_embed.num_patches
# Positional Embeddings
@@ -388,19 +393,20 @@ def __init__(self,
dpr = np.linspace(0, drop_path_rate, depth)
self.blocks = nn.LayerList([
- Block(dim=embed_dim,
- num_heads=num_heads,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[i],
- norm_layer=norm_layer,
- epsilon=epsilon,
- attention_type=self.attention_type,
- wd_bias=wd_bias,
- lr_mult=self.lr_mult_list[(i // 4) + 1]) for i in range(depth)
+ Block(
+ dim=embed_dim,
+ num_heads=num_heads,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop_rate,
+ attn_drop=attn_drop_rate,
+ drop_path=dpr[i],
+ norm_layer=norm_layer,
+ epsilon=epsilon,
+ attention_type=self.attention_type,
+ wd_bias=wd_bias,
+ lr_mult=self.lr_mult_list[(i // 4) + 1]) for i in range(depth)
])
self.norm = eval(norm_layer)(embed_dim, epsilon=epsilon)
@@ -421,14 +427,14 @@ def init_weights(self):
zeros_(m.temporal_fc.bias)
i += 1
"""Second, if provide pretrained ckpt, load it"""
- if isinstance(
- self.pretrained, str
- ) and self.pretrained.strip() != "": # load pretrained weights
- load_ckpt(self,
- self.pretrained,
- num_patches=self.patch_embed.num_patches,
- num_seg=self.num_seg,
- attention_type=self.attention_type)
+ if isinstance(self.pretrained, str) and self.pretrained.strip(
+ ) != "": # load pretrained weights
+ load_ckpt(
+ self,
+ self.pretrained,
+ num_patches=self.patch_embed.num_patches,
+ num_seg=self.num_seg,
+ attention_type=self.attention_type)
elif self.pretrained is None or self.pretrained.strip() == "":
pass
else:
@@ -447,7 +453,8 @@ def forward_features(self, x):
# B = x.shape[0]
B = paddle.shape(x)[0]
x, T, W = self.patch_embed(x) # [BT,nH*nW,F]
- cls_tokens = self.cls_token.expand((B * T, -1, -1)) # [1,1,F]->[BT,1,F]
+ cls_tokens = self.cls_token.expand(
+ (B * T, -1, -1)) # [1,1,F]->[BT,1,F]
x = paddle.concat((cls_tokens, x), axis=1)
pos_interp = (x.shape[1] != self.pos_embed.shape[1])
if pos_interp:
@@ -458,13 +465,12 @@ def forward_features(self, x):
P = int(other_pos_embed.shape[2]**0.5)
H = x.shape[1] // W
other_pos_embed = other_pos_embed.reshape([1, x.shape[2], P, P])
- new_pos_embed = F.interpolate(other_pos_embed,
- size=(H, W),
- mode='nearest')
+ new_pos_embed = F.interpolate(
+ other_pos_embed, size=(H, W), mode='nearest')
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose((0, 2, 1))
- new_pos_embed = paddle.concat((cls_pos_embed, new_pos_embed),
- axis=1)
+ new_pos_embed = paddle.concat(
+ (cls_pos_embed, new_pos_embed), axis=1)
x = x + new_pos_embed
else:
x = x + self.pos_embed
@@ -473,8 +479,9 @@ def forward_features(self, x):
# Time Embeddings
if self.attention_type != 'space_only':
- cls_tokens = x[:B, 0, :].unsqueeze(1) if B > 0 else x.split(
- T)[0].index_select(paddle.to_tensor([0]), axis=1)
+ cls_tokens = x[:B, 0, :].unsqueeze(1) if B > 0 else x.split(T)[
+ 0].index_select(
+ paddle.to_tensor([0]), axis=1)
x = x[:, 1:]
_, _n, _m = x.shape
_t = T
@@ -484,9 +491,9 @@ def forward_features(self, x):
time_interp = (T != self.time_embed.shape[1])
if time_interp: # T' != T
time_embed = self.time_embed.transpose((0, 2, 1)).unsqueeze(0)
- new_time_embed = F.interpolate(time_embed,
- size=(T, x.shape[-1]),
- mode='nearest').squeeze(0)
+ new_time_embed = F.interpolate(
+ time_embed, size=(T, x.shape[-1]),
+ mode='nearest').squeeze(0)
new_time_embed = new_time_embed.transpose((0, 2, 1))
x = x + new_time_embed
else:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/bbox_utils.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/bbox_utils.py
old mode 100644
new mode 100755
index 23b4555b4..76579f817
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/bbox_utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/bbox_utils.py
@@ -169,7 +169,8 @@ def decode_yolo(box, anchor, downsample_ratio):
"""
x, y, w, h = box
na, grid_h, grid_w = x.shape[1:4]
- grid = make_grid(grid_h, grid_w, x.dtype).reshape((1, 1, grid_h, grid_w, 2))
+ grid = make_grid(grid_h, grid_w, x.dtype).reshape(
+ (1, 1, grid_h, grid_w, 2))
x1 = (x + grid[:, :, :, :, 0:1]) / grid_w
y1 = (y + grid[:, :, :, :, 1:2]) / grid_h
@@ -248,7 +249,8 @@ def bbox_iou(box1, box2, giou=False, diou=False, ciou=False, eps=1e-9):
# convex diagonal squared
c2 = cw**2 + ch**2 + eps
# center distance
- rho2 = ((px1 + px2 - gx1 - gx2)**2 + (py1 + py2 - gy1 - gy2)**2) / 4
+ rho2 = (
+ (px1 + px2 - gx1 - gx2)**2 + (py1 + py2 - gy1 - gy2)**2) / 4
if diou:
return iou - rho2 / c2
else:
@@ -363,8 +365,8 @@ def rbox2delta(proposals, gt, means=[0, 0, 0, 0, 0], stds=[1, 1, 1, 1, 1]):
coord = gt[..., 0:2] - proposals[..., 0:2]
dx = (np.cos(proposals[..., 4]) * coord[..., 0] + np.sin(proposals[..., 4])
* coord[..., 1]) / proposals_widths
- dy = (-np.sin(proposals[..., 4]) * coord[..., 0] + np.cos(proposals[..., 4])
- * coord[..., 1]) / proposals_heights
+ dy = (-np.sin(proposals[..., 4]) * coord[..., 0] +
+ np.cos(proposals[..., 4]) * coord[..., 1]) / proposals_heights
dw = np.log(gt_widths / proposals_widths)
dh = np.log(gt_heights / proposals_heights)
da = (gt_angle - proposals_angle)
@@ -455,7 +457,8 @@ def norm_angle(angle, range=[-np.pi / 4, np.pi]):
def cal_line_length(point1, point2):
import math
return math.sqrt(
- math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
+ math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1],
+ 2))
def get_best_begin_point_single(coordinate):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/builder.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/builder.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/base.py
old mode 100644
new mode 100755
index 4d5ccb8fe..7f9412874
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/base.py
@@ -3,16 +3,18 @@
import paddle.nn as nn
from ...registry import DETECTORS
+
@DETECTORS.register()
class BaseDetector(nn.Layer):
"""Base class for detectors. """
+
def __init__(self, backbone=None, head=None):
super().__init__()
def init_weights(self):
"""Initialize the model network weights. """
- self.backbone.init_weights()
+ self.backbone.init_weights()
self.head.init_weights()
def extract_feature(self, imgs, iter_num):
@@ -20,7 +22,7 @@ def extract_feature(self, imgs, iter_num):
feature = self.backbone(imgs)
return feature
- def forward(self, data_batch, mode='infer'):
+ def forward(self, data_batch, mode='infer'):
if mode == 'train':
return self.train_step(data_batch)
elif mode == 'valid':
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/fast_rcnn.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/fast_rcnn.py
old mode 100644
new mode 100755
index e8f912dbe..8fb86b9fd
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/fast_rcnn.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/fast_rcnn.py
@@ -15,9 +15,9 @@
from .two_stage import TwoStageDetector
from ...registry import DETECTORS
+
@DETECTORS.register()
class FastRCNN(TwoStageDetector):
-
def __init__(self,
backbone,
head=None,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/two_stage.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/two_stage.py
old mode 100644
new mode 100755
index f9deb1d0f..1186daa2c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/two_stage.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/detectors/two_stage.py
@@ -102,10 +102,8 @@ def val_step(self, data, rescale=False):
img_metas = scores, entity_ids
x = self.extract_feat(img=[img_slow, img_fast])
- return self.roi_head.simple_test(x,
- proposals[0],
- img_shape,
- rescale=rescale)
+ return self.roi_head.simple_test(
+ x, proposals[0], img_shape, rescale=rescale)
def test_step(self, data, rescale=False):
return self.val_step(data, rescale)
@@ -121,10 +119,8 @@ def infer_step(self, data, rescale=False):
# using slowfast model to extract spatio-temporal features
x = self.extract_feat(img=[img_slow, img_fast])
- ret = self.roi_head.simple_test(x,
- proposals[0],
- img_shape,
- rescale=rescale)
+ ret = self.roi_head.simple_test(
+ x, proposals[0], img_shape, rescale=rescale)
return ret
def get_unpad_datas(self, data):
@@ -148,25 +144,22 @@ def get_unpad_datas(self, data):
pad_proposal = pad_proposals[bi]
len_proposal = len_proposals[bi]
index_proposal = paddle.arange(len_proposal)
- proposal = paddle.index_select(x=pad_proposal,
- index=index_proposal,
- axis=0)
+ proposal = paddle.index_select(
+ x=pad_proposal, index=index_proposal, axis=0)
proposals.append(proposal)
pad_gt_bbox = pad_gt_bboxes[bi]
len_gt_bbox = len_gt_bboxes[bi]
index_gt_bbox = paddle.arange(len_gt_bbox)
- gt_bbox = paddle.index_select(x=pad_gt_bbox,
- index=index_gt_bbox,
- axis=0)
+ gt_bbox = paddle.index_select(
+ x=pad_gt_bbox, index=index_gt_bbox, axis=0)
gt_bboxes.append(gt_bbox)
pad_gt_label = pad_gt_labels[bi]
len_gt_label = len_gt_labels[bi]
index_gt_label = paddle.arange(len_gt_label)
- gt_label = paddle.index_select(x=pad_gt_label,
- index=index_gt_label,
- axis=0)
+ gt_label = paddle.index_select(
+ x=pad_gt_label, index=index_gt_label, axis=0)
gt_labels.append(gt_label)
pad_score = pad_scores[bi]
@@ -178,9 +171,8 @@ def get_unpad_datas(self, data):
pad_entity_id = pad_entity_ids[bi]
len_entity_id = len_entity_ids[bi]
index_entity_id = paddle.arange(len_entity_id)
- entity_id = paddle.index_select(x=pad_entity_id,
- index=index_entity_id,
- axis=0)
+ entity_id = paddle.index_select(
+ x=pad_entity_id, index=index_entity_id, axis=0)
entity_ids.append(entity_id)
return proposals, gt_bboxes, gt_labels, scores, entity_ids
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/base.py
old mode 100644
new mode 100755
index cdddd674f..dd145ab62
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/base.py
@@ -27,6 +27,7 @@ class BaseEstimator(nn.Layer):
"""BaseEstimator
"""
+
def __init__(self, backbone=None, head=None):
super().__init__()
if backbone is not None:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/depth_estimator.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/depth_estimator.py
old mode 100644
new mode 100755
index 13ee87775..0423420fd
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/depth_estimator.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/estimators/depth_estimator.py
@@ -24,6 +24,7 @@
class DepthEstimator(BaseEstimator):
"""DepthEstimator
"""
+
def forward_net(self, inputs, day_or_night='day_and_night'):
if self.backbone is not None:
outputs = self.backbone(inputs, day_or_night)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/base.py
old mode 100644
new mode 100755
index cfd2869f6..9f1188b64
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/base.py
@@ -25,6 +25,7 @@ class BaseLocalizer(nn.Layer):
- Methods:``valid_step``, define your valid step, always the same as train_step.
- Methods:``test_step``, define your test step.
"""
+
def __init__(self, backbone, loss):
super().__init__()
self.backbone = builder.build_backbone(backbone)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/bmn_localizer.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/bmn_localizer.py
old mode 100644
new mode 100755
index 5afbd3a0c..d8c049bfd
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/bmn_localizer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/localizers/bmn_localizer.py
@@ -20,6 +20,7 @@
class BMNLocalizer(BaseLocalizer):
"""BMN Localization framework
"""
+
def forward_net(self, imgs):
"""Call backbone forward.
"""
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/actbert.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/actbert.py
old mode 100644
new mode 100755
index 4f2c074ff..f7e86cf50
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/actbert.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/actbert.py
@@ -21,6 +21,7 @@
@MULTIMODAL.register()
class ActBert(BaseMultimodal):
"""ActBert model framework."""
+
def forward_net(self, text_ids, action_feat, image_feat, image_loc,
token_type_ids, text_mask, image_mask, action_mask):
pred = self.backbone(text_ids, action_feat, image_feat, image_loc,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/base.py
old mode 100644
new mode 100755
index bc57f9765..a7a943c44
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/base.py
@@ -19,6 +19,7 @@ class BaseMultimodal(nn.Layer):
loss(dict): Loss function.
"""
+
def __init__(self, backbone=None, head=None, loss=None):
super().__init__()
if backbone is not None:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/base.py
old mode 100644
new mode 100755
index a7c925975..f0d96742a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/base.py
@@ -25,6 +25,7 @@ class BasePartitioner(nn.Layer):
- Methods:``valid_step``, define your valid step, always the same as train_step.
- Methods:``test_step``, define your test step.
"""
+
def __init__(self, backbone=None, head=None):
super().__init__()
if backbone is not None:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/transnetv2_partitioner.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/transnetv2_partitioner.py
old mode 100644
new mode 100755
index c3295068c..f7827ed14
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/transnetv2_partitioner.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/transnetv2_partitioner.py
@@ -20,6 +20,7 @@
class TransNetV2Partitioner(BasePartitioner):
"""TransNetV2 Partitioner framework
"""
+
def forward_net(self, imgs):
one_hot_pred = self.backbone(imgs)
return one_hot_pred
@@ -35,9 +36,12 @@ def train_step(self, data_batch):
one_hot_pred, dict_ = one_hot_pred
many_hot_pred = dict_.get("many_hot", None)
comb_reg_loss = dict_.get("comb_reg_loss", None)
- loss_metrics = self.head.loss(one_hot_pred, one_hot_gt,
- many_hot_pred, many_hot_gt,
- reg_losses={"comb_reg": comb_reg_loss})
+ loss_metrics = self.head.loss(
+ one_hot_pred,
+ one_hot_gt,
+ many_hot_pred,
+ many_hot_gt,
+ reg_losses={"comb_reg": comb_reg_loss})
return loss_metrics
def val_step(self, data_batch):
@@ -49,9 +53,12 @@ def val_step(self, data_batch):
one_hot_pred, dict_ = one_hot_pred
many_hot_pred = dict_.get("many_hot", None)
comb_reg_loss = dict_.get("comb_reg_loss", None)
- loss_metrics = self.head.loss(one_hot_pred, one_hot_gt,
- many_hot_pred, many_hot_gt,
- reg_losses={"comb_reg": comb_reg_loss})
+ loss_metrics = self.head.loss(
+ one_hot_pred,
+ one_hot_gt,
+ many_hot_pred,
+ many_hot_gt,
+ reg_losses={"comb_reg": comb_reg_loss})
return loss_metrics
def test_step(self, data_batch):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/base.py
old mode 100644
new mode 100755
index bf31caf04..8dfa99534
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/base.py
@@ -18,6 +18,7 @@ class BaseRecognizer(nn.Layer):
head (dict): Classification head to process feature.
"""
+
def __init__(self, backbone=None, head=None, runtime_cfg=None):
super().__init__()
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer1d.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer1d.py
old mode 100644
new mode 100755
index 3927b181e..a98ebbc69
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer1d.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer1d.py
@@ -17,6 +17,7 @@
@RECOGNIZERS.register()
class Recognizer1D(BaseRecognizer):
"""1D recognizer model framework."""
+
def forward_net(self, imgs):
"""Define how the model is going to train, from input to output.
"""
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer2d.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer2d.py
old mode 100644
new mode 100755
index 7b1118fd8..fdaf97229
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer2d.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer2d.py
@@ -21,9 +21,11 @@
@RECOGNIZERS.register()
class Recognizer2D(BaseRecognizer):
"""2D recognizer model framework."""
+
def __init__(self, backbone=None, head=None):
super().__init__(backbone=backbone, head=head)
- self.avgpool2d = paddle.nn.AdaptiveAvgPool2D((1, 1), data_format='NCHW')
+ self.avgpool2d = paddle.nn.AdaptiveAvgPool2D(
+ (1, 1), data_format='NCHW')
def forward_net(self, imgs):
# NOTE: As the num_segs is an attribute of dataset phase, and didn't pass to build_head phase, should obtain it from imgs(paddle.Tensor) now, then call self.head method.
@@ -78,4 +80,4 @@ def infer_step(self, data_batch):
imgs = paddle.reshape_(imgs, [-1] + list(imgs.shape[2:]))
feature = self.backbone(imgs)
feat = self.avgpool2d(feature)
- return feat
+ return feat
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer3d.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer3d.py
old mode 100644
new mode 100755
index 9fdabf58c..362869cc3
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer3d.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer3d.py
@@ -21,6 +21,7 @@
class Recognizer3D(BaseRecognizer):
"""3D Recognizer model framework.
"""
+
def forward_net(self, imgs):
"""Define how the model is going to run, from input to output.
"""
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer3dMRI.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer3dMRI.py
old mode 100644
new mode 100755
index 9298491c0..03e039c5f
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer3dMRI.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer3dMRI.py
@@ -22,6 +22,7 @@
class Recognizer3DMRI(BaseRecognizer):
"""3D Recognizer model framework.
"""
+
def forward_net(self, imgs):
"""Define how the model is going to run, from input to output.
"""
@@ -56,10 +57,8 @@ def val_step(self, data_batch):
# call forward
cls_score = self.forward_net(imgs)
cls_score = paddle.nn.functional.sigmoid(cls_score)
- loss_metrics = self.head.loss(cls_score,
- labels,
- valid_mode=True,
- if_top5=False)
+ loss_metrics = self.head.loss(
+ cls_score, labels, valid_mode=True, if_top5=False)
return loss_metrics
def test_step(self, data_batch):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizerMRI.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizerMRI.py
old mode 100644
new mode 100755
index 4b1713e61..98b8e1a14
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizerMRI.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizerMRI.py
@@ -21,6 +21,7 @@
@RECOGNIZERS.register()
class RecognizerMRI(BaseRecognizer):
"""2D recognizer model framework."""
+
def forward_net(self, imgs):
# NOTE: As the num_segs is an attribute of dataset phase, and didn't pass to build_head phase, should obtain it from imgs(paddle.Tensor) now, then call self.head method.
num_segs = imgs.shape[
@@ -56,10 +57,8 @@ def val_step(self, data_batch):
labels = data_batch[1:]
cls_score = self.forward_net(imgs)
cls_score = paddle.nn.functional.sigmoid(cls_score)
- loss_metrics = self.head.loss(cls_score,
- labels,
- valid_mode=True,
- if_top5=False)
+ loss_metrics = self.head.loss(
+ cls_score, labels, valid_mode=True, if_top5=False)
return loss_metrics
def test_step(self, data_batch):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_gcn.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_gcn.py
old mode 100644
new mode 100755
index 4dd974900..cebf73aac
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_gcn.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_gcn.py
@@ -21,6 +21,7 @@
class RecognizerGCN(BaseRecognizer):
"""GCN Recognizer model framework.
"""
+
def forward_net(self, data):
"""Define how the model is going to run, from input to output.
"""
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_transformer.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_transformer.py
old mode 100644
new mode 100755
index 4144edacf..9ceee2e06
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_transformer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_transformer.py
@@ -23,6 +23,7 @@
@RECOGNIZERS.register()
class RecognizerTransformer(BaseRecognizer):
"""Transformer's recognizer model framework."""
+
def forward_net(self, imgs):
# imgs.shape=[N,C,T,H,W], for transformer case
if self.backbone is not None:
@@ -92,7 +93,7 @@ def _average_view(self, cls_score, avg_type='score'):
return paddle.add_n(cls_score) / len(cls_score)
elif avg_type == 'prob':
return paddle.add_n(
- [F.softmax(score, axis=-1)
- for score in cls_score]) / len(cls_score)
+ [F.softmax(
+ score, axis=-1) for score in cls_score]) / len(cls_score)
else:
raise NotImplementedError
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_transformer_MRI.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_transformer_MRI.py
old mode 100644
new mode 100755
index e8696b4da..4222be15f
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_transformer_MRI.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/recognizers/recognizer_transformer_MRI.py
@@ -23,6 +23,7 @@
@RECOGNIZERS.register()
class RecognizerTransformer_MRI(BaseRecognizer):
"""Transformer's recognizer model framework."""
+
def forward_net(self, imgs):
# imgs.shape=[N,C,T,H,W], for transformer case
@@ -56,10 +57,8 @@ def val_step(self, data_batch):
labels = data_batch[1:]
cls_score = self.forward_net(imgs)
cls_score = paddle.nn.functional.sigmoid(cls_score)
- loss_metrics = self.head.loss(cls_score,
- labels,
- valid_mode=True,
- if_top5=False)
+ loss_metrics = self.head.loss(
+ cls_score, labels, valid_mode=True, if_top5=False)
return loss_metrics
def test_step(self, data_batch):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/base.py
old mode 100644
new mode 100755
index 0c5cb07f7..cb140c950
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/base.py
@@ -28,6 +28,7 @@ class BaseSegment(nn.Layer):
head (dict): Head to process feature.
loss(dict): Loss function.
"""
+
def __init__(self, backbone=None, head=None, loss=None):
super().__init__()
if backbone is not None:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/cfbi.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/cfbi.py
old mode 100644
new mode 100755
index dcdc512f0..046f214c0
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/cfbi.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/cfbi.py
@@ -26,6 +26,7 @@
@SEGMENT.register()
class CFBI(BaseSegment):
"""CFBI model framework."""
+
def __init__(self, backbone=None, head=None, loss=None):
super().__init__(backbone, head, loss)
x1 = paddle.zeros([3, 1, 1, 1])
@@ -64,16 +65,16 @@ def test_step(self, data_batch):
ref_masks,
prev_mask,
gt_ids,
- current_low_level=current_low_level,
- )
+ current_low_level=current_low_level, )
all_pred = []
for i in range(bs):
pred = tmp_dic[i]
- pred = F.interpolate(pred,
- size=[pred_size[0], pred_size[1]],
- mode='bilinear',
- align_corners=True)
+ pred = F.interpolate(
+ pred,
+ size=[pred_size[0], pred_size[1]],
+ mode='bilinear',
+ align_corners=True)
all_pred.append(pred)
all_pred = paddle.concat(all_pred, axis=0)
all_pred = F.softmax(all_pred, axis=1)
@@ -113,24 +114,28 @@ def before_seghead_process(self,
ref_frame_embeddings = list(zip(*ref_frame_embeddings))
all_scale_ref_frame_label = []
for ref_frame_label in ref_frame_labels:
- scale_ref_frame_label = paddle.cast(F.interpolate(
- paddle.cast(ref_frame_label, dtype="float32"),
- size=(h, w),
- mode='nearest'),
- dtype="int32")
+ scale_ref_frame_label = paddle.cast(
+ F.interpolate(
+ paddle.cast(
+ ref_frame_label, dtype="float32"),
+ size=(h, w),
+ mode='nearest'),
+ dtype="int32")
all_scale_ref_frame_label.append(scale_ref_frame_label)
scale_ref_frame_labels.append(all_scale_ref_frame_label)
- scale_previous_frame_label = paddle.cast(F.interpolate(
- paddle.cast(previous_frame_mask, dtype="float32"),
- size=(h, w),
- mode='nearest'),
- dtype="int32")
+ scale_previous_frame_label = paddle.cast(
+ F.interpolate(
+ paddle.cast(
+ previous_frame_mask, dtype="float32"),
+ size=(h, w),
+ mode='nearest'),
+ dtype="int32")
scale_previous_frame_labels.append(scale_previous_frame_label)
for n in range(bs):
ref_obj_ids = paddle.reshape(
- paddle.cast(paddle.arange(0,
- np.array(gt_ids)[n] + 1),
- dtype="int32"), [-1, 1, 1, 1])
+ paddle.cast(
+ paddle.arange(0, np.array(gt_ids)[n] + 1), dtype="int32"),
+ [-1, 1, 1, 1])
obj_num = ref_obj_ids.shape[0]
low_level_feat = paddle.unsqueeze(current_low_level[n], axis=0)
all_CE_input = []
@@ -143,19 +148,24 @@ def before_seghead_process(self,
seq_current_frame_embedding = current_frame_embedding[n]
seq_prev_frame_embedding = previous_frame_embedding[n]
seq_previous_frame_label = paddle.cast(
- (paddle.cast(scale_previous_frame_label[n], dtype="int32")
- == ref_obj_ids),
+ (paddle.cast(
+ scale_previous_frame_label[n], dtype="int32") ==
+ ref_obj_ids),
dtype="float32")
if np.array(gt_ids)[n] > 0:
- dis_bias = paddle.concat([
- paddle.unsqueeze(self.bg_bias[scale_idx], axis=0),
- paddle.expand(
- paddle.unsqueeze(self.fg_bias[scale_idx], axis=0),
- [np.array(gt_ids)[n], -1, -1, -1])
- ],
- axis=0)
+ dis_bias = paddle.concat(
+ [
+ paddle.unsqueeze(
+ self.bg_bias[scale_idx], axis=0),
+ paddle.expand(
+ paddle.unsqueeze(
+ self.fg_bias[scale_idx], axis=0),
+ [np.array(gt_ids)[n], -1, -1, -1])
+ ],
+ axis=0)
else:
- dis_bias = paddle.unsqueeze(self.bg_bias[scale_idx], axis=0)
+ dis_bias = paddle.unsqueeze(
+ self.bg_bias[scale_idx], axis=0)
#Global FG map
matching_dim = MODEL_SEMANTIC_MATCHING_DIM[scale_idx]
seq_current_frame_embedding_for_matching = paddle.transpose(
@@ -179,20 +189,20 @@ def before_seghead_process(self,
seq_ref_frame_embedding = paddle.transpose(
seq_ref_frame_embedding, [1, 2, 0])
seq_ref_frame_label = paddle.cast(
- (paddle.cast(scale_ref_frame_label[n],
- dtype="int32") == ref_obj_ids),
+ (paddle.cast(
+ scale_ref_frame_label[n], dtype="int32") ==
+ ref_obj_ids),
dtype="float32")
seq_ref_frame_labels.append(seq_ref_frame_label)
seq_ref_frame_label = paddle.transpose(
- paddle.squeeze(seq_ref_frame_label, axis=1),
- [1, 2, 0])
+ paddle.squeeze(
+ seq_ref_frame_label, axis=1), [1, 2, 0])
all_reference_embeddings.append(
seq_ref_frame_embedding[:, :, :matching_dim])
all_reference_labels.append(seq_ref_frame_label)
global_matching_fg = global_matching_for_eval(
all_reference_embeddings=all_reference_embeddings,
- query_embeddings=
- seq_current_frame_embedding_for_matching,
+ query_embeddings=seq_current_frame_embedding_for_matching,
all_reference_labels=all_reference_labels,
n_chunks=TEST_GLOBAL_MATCHING_CHUNK[scale_idx],
dis_bias=dis_bias,
@@ -204,7 +214,8 @@ def before_seghead_process(self,
seq_prev_frame_embedding_for_matching = paddle.transpose(
seq_prev_frame_embedding[:matching_dim], [1, 2, 0])
seq_previous_frame_label_for_matching = paddle.transpose(
- paddle.squeeze(seq_previous_frame_label, axis=1), [1, 2, 0])
+ paddle.squeeze(
+ seq_previous_frame_label, axis=1), [1, 2, 0])
local_matching_fg = local_matching(
prev_frame_embedding=seq_prev_frame_embedding_for_matching,
query_embedding=seq_current_frame_embedding_for_matching,
@@ -220,9 +231,11 @@ def before_seghead_process(self,
#Aggregate Pixel-level Matching
to_cat_global_matching_fg = paddle.transpose(
- paddle.squeeze(global_matching_fg, axis=0), [2, 3, 0, 1])
+ paddle.squeeze(
+ global_matching_fg, axis=0), [2, 3, 0, 1])
to_cat_local_matching_fg = paddle.transpose(
- paddle.squeeze(local_matching_fg, axis=0), [2, 3, 0, 1])
+ paddle.squeeze(
+ local_matching_fg, axis=0), [2, 3, 0, 1])
all_to_cat = [
to_cat_global_matching_fg, to_cat_local_matching_fg,
seq_previous_frame_label
@@ -231,27 +244,28 @@ def before_seghead_process(self,
#Global and Local BG map
if MODEL_MATCHING_BACKGROUND:
to_cat_global_matching_bg = foreground2background(
- to_cat_global_matching_fg,
- np.array(gt_ids)[n] + 1)
+ to_cat_global_matching_fg, np.array(gt_ids)[n] + 1)
reshaped_prev_nn_feature_n = paddle.unsqueeze(
paddle.transpose(to_cat_local_matching_fg,
[0, 2, 3, 1]),
axis=1)
to_cat_local_matching_bg = foreground2background(
- reshaped_prev_nn_feature_n,
- np.array(gt_ids)[n] + 1)
- to_cat_local_matching_bg = paddle.squeeze(paddle.transpose(
- to_cat_local_matching_bg, [0, 4, 2, 3, 1]),
- axis=-1)
+ reshaped_prev_nn_feature_n, np.array(gt_ids)[n] + 1)
+ to_cat_local_matching_bg = paddle.squeeze(
+ paddle.transpose(to_cat_local_matching_bg,
+ [0, 4, 2, 3, 1]),
+ axis=-1)
all_to_cat += [
to_cat_local_matching_bg, to_cat_global_matching_bg
]
to_cat_current_frame_embedding = paddle.expand(
- paddle.unsqueeze(current_frame_embedding[n], axis=0),
+ paddle.unsqueeze(
+ current_frame_embedding[n], axis=0),
[obj_num, -1, -1, -1])
to_cat_prev_frame_embedding = paddle.expand(
- paddle.unsqueeze(previous_frame_embedding[n], axis=0),
+ paddle.unsqueeze(
+ previous_frame_embedding[n], axis=0),
[obj_num, -1, -1, -1])
to_cat_prev_frame_embedding_fg = to_cat_prev_frame_embedding * seq_previous_frame_label
to_cat_prev_frame_embedding_bg = to_cat_prev_frame_embedding * (
@@ -271,8 +285,9 @@ def before_seghead_process(self,
all_ref_frame_embedding,
seq_ref_frame_labels,
paddle.expand(
- paddle.unsqueeze(previous_frame_embedding[n],
- axis=0), [obj_num, -1, -1, -1]),
+ paddle.unsqueeze(
+ previous_frame_embedding[n], axis=0),
+ [obj_num, -1, -1, -1]),
seq_previous_frame_label,
epsilon=self.epsilon)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/utils.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/utils.py
old mode 100644
new mode 100755
index 1ec3be4d2..75f3ed736
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/segment/utils.py
@@ -98,17 +98,16 @@ def _nn_features_per_object_for_chunk(reference_embeddings, ref_square,
query_embeddings_key = query_embeddings
dists = _flattened_pairwise_distances(reference_embeddings_key, ref_square,
query_embeddings_key, query_square)
- dists = (paddle.unsqueeze(dists, axis=1) +
- paddle.unsqueeze(wrong_label_mask, axis=0) *
- WRONG_LABEL_PADDING_DISTANCE)
+ dists = (paddle.unsqueeze(
+ dists, axis=1) + paddle.unsqueeze(
+ wrong_label_mask, axis=0) * WRONG_LABEL_PADDING_DISTANCE)
features = paddle.min(dists, axis=2, keepdim=True)
return features
-def _nearest_neighbor_features_per_object_in_chunks(reference_embeddings_flat,
- query_embeddings_flat,
- reference_labels_flat,
- n_chunks):
+def _nearest_neighbor_features_per_object_in_chunks(
+ reference_embeddings_flat, query_embeddings_flat,
+ reference_labels_flat, n_chunks):
"""Calculates the nearest neighbor features per object in chunks to save mem.
Uses chunking to bound the memory use.
Args:
@@ -144,8 +143,8 @@ def _nearest_neighbor_features_per_object_in_chunks(reference_embeddings_flat,
query_square_chunk = query_square[chunk_start:chunk_end]
if query_square_chunk.shape[0] == 0:
continue
- query_embeddings_flat_chunk = query_embeddings_flat[
- chunk_start:chunk_end]
+ query_embeddings_flat_chunk = query_embeddings_flat[chunk_start:
+ chunk_end]
features = _nn_features_per_object_for_chunk(
reference_embeddings_flat, ref_square, query_embeddings_flat_chunk,
query_square_chunk, wrong_label_mask)
@@ -192,8 +191,8 @@ def global_matching(reference_embeddings,
assert (reference_embeddings.shape[:2] == reference_labels.shape[:2])
if use_float16:
query_embeddings = paddle.cast(query_embeddings, dtype="float16")
- reference_embeddings = paddle.cast(reference_embeddings,
- dtype="float16")
+ reference_embeddings = paddle.cast(
+ reference_embeddings, dtype="float16")
h, w, embedding_dim = query_embeddings.shape
obj_nums = reference_labels.shape[2]
@@ -202,14 +201,13 @@ def global_matching(reference_embeddings,
w_pad = (atrous_rate - w % atrous_rate) % atrous_rate
selected_points = paddle.zeros([h + h_pad, w + w_pad])
selected_points = selected_points.view(
- (h + h_pad) // atrous_rate, atrous_rate, (w + w_pad) // atrous_rate,
- atrous_rate)
+ (h + h_pad) // atrous_rate, atrous_rate,
+ (w + w_pad) // atrous_rate, atrous_rate)
selected_points[:, 0, :, 0] = 1.
selected_points = paddle.reshape(selected_points,
[h + h_pad, w + w_pad, 1])[:h, :w]
- is_big_obj = (paddle.sum(
- reference_labels,
- axis=(0, 1))) > (atrous_obj_pixel_num * atrous_rate**2)
+ is_big_obj = (paddle.sum(reference_labels, axis=(0, 1))) > (
+ atrous_obj_pixel_num * atrous_rate**2)
reference_labels[:, :,
is_big_obj] = reference_labels[:, :,
is_big_obj] * selected_points
@@ -233,13 +231,12 @@ def global_matching(reference_embeddings,
[-1, embedding_dim])
nn_features = _nearest_neighbor_features_per_object_in_chunks(
- reference_embeddings_flat, query_embeddings_flat, reference_labels_flat,
- n_chunks)
+ reference_embeddings_flat, query_embeddings_flat,
+ reference_labels_flat, n_chunks)
nn_features_reshape = paddle.reshape(nn_features, [1, h, w, obj_nums, 1])
- nn_features_reshape = (
- F.sigmoid(nn_features_reshape +
- paddle.reshape(dis_bias, [1, 1, 1, -1, 1])) - 0.5) * 2
+ nn_features_reshape = (F.sigmoid(nn_features_reshape + paddle.reshape(
+ dis_bias, [1, 1, 1, -1, 1])) - 0.5) * 2
#TODO: ori_size is not None
@@ -300,11 +297,11 @@ def global_matching_for_eval(all_reference_embeddings,
[h + h_pad, w + w_pad, 1])[:h, :w]
for reference_embeddings, reference_labels, idx in zip(
- all_reference_embeddings, all_reference_labels, range(ref_num)):
+ all_reference_embeddings, all_reference_labels,
+ range(ref_num)):
if atrous_rate > 1:
- is_big_obj = paddle.sum(
- reference_labels,
- axis=(0, 1)) > (atrous_obj_pixel_num * atrous_rate**2)
+ is_big_obj = paddle.sum(reference_labels, axis=(0, 1)) > (
+ atrous_obj_pixel_num * atrous_rate**2)
is_big_obj = list(np.array(is_big_obj))
for j in range(len(is_big_obj)):
if is_big_obj[j] == True:
@@ -322,8 +319,8 @@ def global_matching_for_eval(all_reference_embeddings,
reference_embeddings_flat = paddle.concat(
x=all_reference_embeddings_flat, axis=0)
- reference_labels_flat = paddle.concat(x=all_reference_labels_flat,
- axis=0)
+ reference_labels_flat = paddle.concat(
+ x=all_reference_labels_flat, axis=0)
else:
if ref_num == 1:
reference_embeddings, reference_labels = all_reference_embeddings[
@@ -341,9 +338,9 @@ def global_matching_for_eval(all_reference_embeddings,
[(h + h_pad) // atrous_rate, atrous_rate,
(w + w_pad) // atrous_rate, atrous_rate, 32])
reference_labels = paddle.reshape(
- reference_labels,
- [(h + h_pad) // atrous_rate, atrous_rate,
- (w + w_pad) // atrous_rate, atrous_rate, -1])
+ reference_labels, [(h + h_pad) // atrous_rate, atrous_rate,
+ (w + w_pad) // atrous_rate, atrous_rate,
+ -1])
reference_embeddings = paddle.reshape(
reference_embeddings[:, 0, :, 0, :],
reference_embeddings[:, 0, :, 0, :].shape)
@@ -362,8 +359,8 @@ def global_matching_for_eval(all_reference_embeddings,
h_pad = (atrous_rate - h % atrous_rate) % atrous_rate
w_pad = (atrous_rate - w % atrous_rate) % atrous_rate
if h_pad > 0 or w_pad > 0:
- reference_embeddings = F.pad(reference_embeddings,
- [0, h_pad, 0, w_pad, 0, 0])
+ reference_embeddings = F.pad(
+ reference_embeddings, [0, h_pad, 0, w_pad, 0, 0])
reference_labels = F.pad(reference_labels,
[0, h_pad, 0, w_pad, 0, 0])
@@ -392,8 +389,8 @@ def global_matching_for_eval(all_reference_embeddings,
reference_embeddings_flat = paddle.concat(
all_reference_embeddings_flat, axis=0)
- reference_labels_flat = paddle.concat(all_reference_labels_flat,
- axis=0)
+ reference_labels_flat = paddle.concat(
+ all_reference_labels_flat, axis=0)
query_embeddings_flat = paddle.reshape(query_embeddings,
[-1, embedding_dim])
@@ -410,18 +407,17 @@ def global_matching_for_eval(all_reference_embeddings,
paddle.expand(all_ref_fg, [-1, embedding_dim])),
[-1, embedding_dim])
if use_float16:
- query_embeddings_flat = paddle.cast(query_embeddings_flat,
- dtype="float16")
- reference_embeddings_flat = paddle.cast(reference_embeddings_flat,
- dtype="float16")
+ query_embeddings_flat = paddle.cast(
+ query_embeddings_flat, dtype="float16")
+ reference_embeddings_flat = paddle.cast(
+ reference_embeddings_flat, dtype="float16")
nn_features = _nearest_neighbor_features_per_object_in_chunks(
- reference_embeddings_flat, query_embeddings_flat, reference_labels_flat,
- n_chunks)
+ reference_embeddings_flat, query_embeddings_flat,
+ reference_labels_flat, n_chunks)
nn_features_reshape = paddle.reshape(nn_features, [1, h, w, obj_nums, 1])
- nn_features_reshape = (
- F.sigmoid(nn_features_reshape +
- paddle.reshape(dis_bias, [1, 1, 1, -1, 1])) - 0.5) * 2
+ nn_features_reshape = (F.sigmoid(nn_features_reshape + paddle.reshape(
+ dis_bias, [1, 1, 1, -1, 1])) - 0.5) * 2
# TODO: ori_size is not None
@@ -455,14 +451,10 @@ def local_pairwise_distances(x,
x = paddle.unsqueeze(paddle.transpose(x, [2, 0, 1]), axis=0)
y = paddle.unsqueeze(paddle.transpose(y, [2, 0, 1]), axis=0)
down_size = (int(ori_height / 2) + 1, int(ori_width / 2) + 1)
- x = F.interpolate(x,
- size=down_size,
- mode='bilinear',
- align_corners=True)
- y = F.interpolate(y,
- size=down_size,
- mode='bilinear',
- align_corners=True)
+ x = F.interpolate(
+ x, size=down_size, mode='bilinear', align_corners=True)
+ y = F.interpolate(
+ y, size=down_size, mode='bilinear', align_corners=True)
x = paddle.unsqueeze(paddle.transpose(x, [1, 2, 0]), axis=0)
y = paddle.unsqueeze(paddle.transpose(y, [1, 2, 0]), axis=0)
@@ -512,45 +504,43 @@ def local_pairwise_distances_parallel(x,
y = paddle.unsqueeze(paddle.transpose(y, [2, 0, 1]), axis=0)
if allow_downsample:
down_size = (int(ori_height / 2) + 1, int(ori_width / 2) + 1)
- x = F.interpolate(x,
- size=down_size,
- mode='bilinear',
- align_corners=True)
- y = F.interpolate(y,
- size=down_size,
- mode='bilinear',
- align_corners=True)
+ x = F.interpolate(
+ x, size=down_size, mode='bilinear', align_corners=True)
+ y = F.interpolate(
+ y, size=down_size, mode='bilinear', align_corners=True)
_, channels, height, width = x.shape
- x2 = paddle.reshape(paddle.sum(paddle.pow(x, 2), axis=1),
- [height, width, 1])
- y2 = paddle.reshape(paddle.sum(paddle.pow(y, 2), axis=1),
- [1, 1, height, width])
+ x2 = paddle.reshape(
+ paddle.sum(paddle.pow(x, 2), axis=1), [height, width, 1])
+ y2 = paddle.reshape(
+ paddle.sum(paddle.pow(y, 2), axis=1), [1, 1, height, width])
pad_max_distance = max_distance - max_distance % atrous_rate
# no change pad
padded_y = F.pad(y, (pad_max_distance, pad_max_distance, pad_max_distance,
pad_max_distance))
- padded_y2 = F.pad(y2, (pad_max_distance, pad_max_distance, pad_max_distance,
- pad_max_distance),
+ padded_y2 = F.pad(y2, (pad_max_distance, pad_max_distance,
+ pad_max_distance, pad_max_distance),
value=WRONG_LABEL_PADDING_DISTANCE)
offset_y = paddle.transpose(
paddle.reshape(
- F.unfold(x=padded_y,
- kernel_sizes=[height, width],
- strides=[atrous_rate, atrous_rate]),
+ F.unfold(
+ x=padded_y,
+ kernel_sizes=[height, width],
+ strides=[atrous_rate, atrous_rate]),
[channels, height * width, -1]), [1, 0, 2])
offset_y2 = paddle.reshape(
- F.unfold(padded_y2,
- kernel_sizes=[height, width],
- strides=[atrous_rate, atrous_rate]), [height, width, -1])
- x = paddle.transpose(paddle.reshape(x, [channels, height * width, -1]),
- [1, 2, 0])
+ F.unfold(
+ padded_y2,
+ kernel_sizes=[height, width],
+ strides=[atrous_rate, atrous_rate]), [height, width, -1])
+ x = paddle.transpose(
+ paddle.reshape(x, [channels, height * width, -1]), [1, 2, 0])
- dists = x2 + offset_y2 - 2. * paddle.reshape(paddle.matmul(x, offset_y),
- [height, width, -1])
+ dists = x2 + offset_y2 - 2. * paddle.reshape(
+ paddle.matmul(x, offset_y), [height, width, -1])
return dists
@@ -597,28 +587,31 @@ def local_matching(prev_frame_embedding,
pad = paddle.ones([1]) * WRONG_LABEL_PADDING_DISTANCE
if use_float16:
query_embedding = paddle.cast(query_embedding, dtype="float16")
- prev_frame_embedding = paddle.cast(prev_frame_embedding,
- dtype="float16")
+ prev_frame_embedding = paddle.cast(
+ prev_frame_embedding, dtype="float16")
pad = paddle.cast(pad, dtype="float16")
if allow_parallel:
- d = local_pairwise_distances_parallel(query_embedding,
- prev_frame_embedding,
- max_distance=max_distance,
- atrous_rate=atrous_rate,
- allow_downsample=allow_downsample)
+ d = local_pairwise_distances_parallel(
+ query_embedding,
+ prev_frame_embedding,
+ max_distance=max_distance,
+ atrous_rate=atrous_rate,
+ allow_downsample=allow_downsample)
else:
- d = local_pairwise_distances(query_embedding,
- prev_frame_embedding,
- max_distance=max_distance,
- atrous_rate=atrous_rate,
- allow_downsample=allow_downsample)
+ d = local_pairwise_distances(
+ query_embedding,
+ prev_frame_embedding,
+ max_distance=max_distance,
+ atrous_rate=atrous_rate,
+ allow_downsample=allow_downsample)
height, width = d.shape[:2]
- labels = paddle.unsqueeze(paddle.transpose(prev_frame_labels, [2, 0, 1]), 1)
- labels = paddle.unsqueeze(paddle.transpose(prev_frame_labels, [2, 0, 1]),
- axis=1)
+ labels = paddle.unsqueeze(
+ paddle.transpose(prev_frame_labels, [2, 0, 1]), 1)
+ labels = paddle.unsqueeze(
+ paddle.transpose(prev_frame_labels, [2, 0, 1]), axis=1)
if (height, width) != ori_size:
labels = F.interpolate(labels, size=(height, width), mode='nearest')
@@ -629,30 +622,32 @@ def local_matching(prev_frame_embedding,
pad_max_distance,
pad_max_distance,
pad_max_distance,
- pad_max_distance,
- ),
+ pad_max_distance, ),
mode='constant',
value=0)
offset_masks = paddle.transpose(
paddle.reshape(
- F.unfold(padded_labels,
- kernel_sizes=[height, width],
- strides=[atrous_rate, atrous_rate]),
+ F.unfold(
+ padded_labels,
+ kernel_sizes=[height, width],
+ strides=[atrous_rate, atrous_rate]),
[obj_num, height, width, -1]), [1, 2, 3, 0]) > 0.9
- d_tiled = paddle.expand(paddle.unsqueeze(
- d, axis=-1), [-1, -1, -1, obj_num]) # h, w, num_local_pos, obj_num
+ d_tiled = paddle.expand(
+ paddle.unsqueeze(
+ d, axis=-1), [-1, -1, -1, obj_num]) # h, w, num_local_pos, obj_num
d_masked = paddle.where(offset_masks, d_tiled, pad)
dists = paddle.min(d_masked, axis=2)
multi_dists = [
- paddle.unsqueeze(paddle.transpose(dists, [2, 0, 1]), axis=1)
+ paddle.unsqueeze(
+ paddle.transpose(dists, [2, 0, 1]), axis=1)
] # n_objects, num_multi_local, h, w
reshaped_d_masked = paddle.reshape(d_masked, [
- height, width, 2 * atrous_max_distance + 1, 2 * atrous_max_distance + 1,
- obj_num
+ height, width, 2 * atrous_max_distance + 1,
+ 2 * atrous_max_distance + 1, obj_num
])
for local_dis in multi_local_distance[:-1]:
local_dis = local_dis // atrous_rate
@@ -660,27 +655,26 @@ def local_matching(prev_frame_embedding,
end_idx = atrous_max_distance + local_dis + 1
new_d_masked = paddle.reshape(
reshaped_d_masked[:, :, start_idx:end_idx, start_idx:end_idx, :],
- reshaped_d_masked[:, :, start_idx:end_idx,
- start_idx:end_idx, :].shape)
+ reshaped_d_masked[:, :, start_idx:end_idx, start_idx:
+ end_idx, :].shape)
new_d_masked = paddle.reshape(new_d_masked,
[height, width, -1, obj_num])
new_dists = paddle.min(new_d_masked, axis=2)
- new_dists = paddle.unsqueeze(paddle.transpose(new_dists, [2, 0, 1]),
- axis=1)
+ new_dists = paddle.unsqueeze(
+ paddle.transpose(new_dists, [2, 0, 1]), axis=1)
multi_dists.append(new_dists)
multi_dists = paddle.concat(multi_dists, axis=1)
- multi_dists = (F.sigmoid(multi_dists +
- paddle.reshape(dis_bias, [-1, 1, 1, 1])) - 0.5) * 2
+ multi_dists = (
+ F.sigmoid(multi_dists + paddle.reshape(dis_bias, [-1, 1, 1, 1])) - 0.5
+ ) * 2
if use_float16:
multi_dists = paddle.cast(multi_dists, dtype="float32")
if (height, width) != ori_size:
- multi_dists = F.interpolate(multi_dists,
- size=ori_size,
- mode='bilinear',
- align_corners=True)
+ multi_dists = F.interpolate(
+ multi_dists, size=ori_size, mode='bilinear', align_corners=True)
multi_dists = paddle.transpose(multi_dists, perm=[2, 3, 0, 1])
multi_dists = paddle.reshape(multi_dists,
[1, ori_size[0], ori_size[1], obj_num, -1])
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/adds_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/adds_head.py
old mode 100644
new mode 100755
index 3b1cd2462..9e11d4867
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/adds_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/adds_head.py
@@ -36,6 +36,7 @@ class AddsHead(nn.Layer):
kwargs (dict, optional): Any keyword argument to initialize.
"""
+
def __init__(self,
avg_reprojection,
disparity_smoothness,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/attention_lstm_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/attention_lstm_head.py
old mode 100644
new mode 100755
index f3415a307..ba11cbf2c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/attention_lstm_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/attention_lstm_head.py
@@ -28,6 +28,7 @@ class AttentionLstmHead(BaseHead):
"""AttentionLstmHead.
Args: TODO
"""
+
def __init__(self,
num_classes=3862,
feature_num=2,
@@ -44,39 +45,41 @@ def __init__(self,
self.lstm_size = lstm_size
self.feature_num = len(self.feature_dims)
for i in range(self.feature_num): # 0:rgb, 1:audio
- fc_feature = paddle.nn.Linear(in_features=self.feature_dims[i],
- out_features=self.embedding_size)
+ fc_feature = paddle.nn.Linear(
+ in_features=self.feature_dims[i],
+ out_features=self.embedding_size)
self.add_sublayer("fc_feature{}".format(i), fc_feature)
- bi_lstm = paddle.nn.LSTM(input_size=self.embedding_size,
- hidden_size=self.lstm_size,
- direction="bidirectional")
+ bi_lstm = paddle.nn.LSTM(
+ input_size=self.embedding_size,
+ hidden_size=self.lstm_size,
+ direction="bidirectional")
self.add_sublayer("bi_lstm{}".format(i), bi_lstm)
drop_rate = 0.5
self.dropout = paddle.nn.Dropout(drop_rate)
- att_fc = paddle.nn.Linear(in_features=self.lstm_size * 2,
- out_features=1)
+ att_fc = paddle.nn.Linear(
+ in_features=self.lstm_size * 2, out_features=1)
self.add_sublayer("att_fc{}".format(i), att_fc)
self.softmax = paddle.nn.Softmax()
- self.fc_out1 = paddle.nn.Linear(in_features=self.lstm_size * 4,
- out_features=8192,
- bias_attr=ParamAttr(
- regularizer=L2Decay(0.0),
- initializer=Normal()))
+ self.fc_out1 = paddle.nn.Linear(
+ in_features=self.lstm_size * 4,
+ out_features=8192,
+ bias_attr=ParamAttr(
+ regularizer=L2Decay(0.0), initializer=Normal()))
self.relu = paddle.nn.ReLU()
- self.fc_out2 = paddle.nn.Linear(in_features=8192,
- out_features=4096,
- bias_attr=ParamAttr(
- regularizer=L2Decay(0.0),
- initializer=Normal()))
- self.fc_logit = paddle.nn.Linear(in_features=4096,
- out_features=self.num_classes,
- bias_attr=ParamAttr(
- regularizer=L2Decay(0.0),
- initializer=Normal()))
+ self.fc_out2 = paddle.nn.Linear(
+ in_features=8192,
+ out_features=4096,
+ bias_attr=ParamAttr(
+ regularizer=L2Decay(0.0), initializer=Normal()))
+ self.fc_logit = paddle.nn.Linear(
+ in_features=4096,
+ out_features=self.num_classes,
+ bias_attr=ParamAttr(
+ regularizer=L2Decay(0.0), initializer=Normal()))
self.sigmoid = paddle.nn.Sigmoid()
def init_weights(self):
@@ -88,9 +91,9 @@ def forward(self, inputs):
# 1. padding to same lenght, make a tensor
# 2. make a mask tensor with the same shpae with 1
# 3. compute output using mask tensor, s.t. output is nothing todo with padding
- assert (len(inputs) == self.feature_num
- ), "Input tensor does not contain {} features".format(
- self.feature_num)
+ assert (
+ len(inputs) == self.feature_num
+ ), "Input tensor does not contain {} features".format(self.feature_num)
att_outs = []
for i in range(len(inputs)):
# 1. fc
@@ -150,7 +153,7 @@ def metric(self, lstm_output, labels):
pred = lstm_output.numpy()
label = labels.numpy()
hit_at_one = youtube8m_metrics.calculate_hit_at_one(pred, label)
- perr = youtube8m_metrics.calculate_precision_at_equal_recall_rate(
- pred, label)
+ perr = youtube8m_metrics.calculate_precision_at_equal_recall_rate(pred,
+ label)
gap = youtube8m_metrics.calculate_gap(pred, label)
return hit_at_one, perr, gap
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/base.py
old mode 100644
new mode 100755
index 2eba23e90..d5a69f98c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/base.py
@@ -41,15 +41,16 @@ class BaseHead(nn.Layer):
ls_eps (float): label smoothing epsilon. Default: 0. .
"""
+
def __init__(
- self,
- num_classes,
- in_channels,
- loss_cfg=dict(
- name="CrossEntropyLoss"
- ), #TODO(shipping): only pass a name or standard build cfg format.
- #multi_class=False, NOTE(shipping): not supported now.
- ls_eps=0.):
+ self,
+ num_classes,
+ in_channels,
+ loss_cfg=dict(
+ name="CrossEntropyLoss"
+ ), #TODO(shipping): only pass a name or standard build cfg format.
+ #multi_class=False, NOTE(shipping): not supported now.
+ ls_eps=0.):
super().__init__()
self.num_classes = num_classes
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/bbox_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/bbox_head.py
old mode 100644
new mode 100755
index 688251ebb..8f0398d41
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/bbox_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/bbox_head.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import paddle
+import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
@@ -20,6 +20,7 @@
from ..registry import HEADS
+
@HEADS.register()
class BBoxHeadAVA(nn.Layer):
"""Simplest RoI head, with only two fc layers for classification and
@@ -30,7 +31,7 @@ def __init__(
temporal_pool_type='avg',
spatial_pool_type='max',
in_channels=2048,
- num_classes=81,# The first class is reserved, to classify bbox as pos / neg
+ num_classes=81, # The first class is reserved, to classify bbox as pos / neg
dropout_ratio=0,
dropout_before_pool=True,
topk=(3, 5),
@@ -77,16 +78,22 @@ def __init__(
if dropout_ratio > 0:
self.dropout = nn.Dropout(dropout_ratio)
- weight_attr = paddle.framework.ParamAttr(name="weight",
- initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))
- bias_attr = paddle.ParamAttr(name="bias",
- initializer=paddle.nn.initializer.Constant(value=0.0))
+ weight_attr = paddle.framework.ParamAttr(
+ name="weight",
+ initializer=paddle.nn.initializer.Normal(
+ mean=0.0, std=0.01))
+ bias_attr = paddle.ParamAttr(
+ name="bias", initializer=paddle.nn.initializer.Constant(value=0.0))
- self.fc_cls = nn.Linear(in_channels, num_classes, weight_attr=weight_attr, bias_attr=bias_attr)
+ self.fc_cls = nn.Linear(
+ in_channels,
+ num_classes,
+ weight_attr=weight_attr,
+ bias_attr=bias_attr)
self.debug_imgs = None
- def forward(self, x,rois, rois_num):
+ def forward(self, x, rois, rois_num):
roi = paddle.concat(rois)
roi_x1 = paddle.index_select(roi, index=paddle.to_tensor(0), axis=1)
roi_x2 = paddle.index_select(roi, index=paddle.to_tensor(2), axis=1)
@@ -100,14 +107,14 @@ def forward(self, x,rois, rois_num):
A2 = paddle.where(A == 0, paddle.zeros_like(A1), A1)
AE = paddle.expand(A2, [A.shape[0], x.shape[1]])
rois_num = paddle.to_tensor(rois_num, dtype='int32')
- if self.dropout_before_pool and self.dropout_ratio > 0 :
+ if self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = self.temporal_pool(x)
x = self.spatial_pool(x)
- if not self.dropout_before_pool and self.dropout_ratio > 0 :
+ if not self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = paddle.reshape(x, [x.shape[0], -1])
- x = paddle.multiply(x, paddle.cast(AE,"float32"))
+ x = paddle.multiply(x, paddle.cast(AE, "float32"))
cls_score = self.fc_cls(x)
# We do not predict bbox, so return None
return cls_score, None
@@ -117,17 +124,18 @@ def get_targets(self, sampling_results, gt_bboxes, gt_labels, pos_weight):
neg_proposals = [res.neg_bboxes for res in sampling_results]
pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
cls_reg_targets = self.bbox_target(pos_proposals, neg_proposals,
- pos_gt_labels, pos_weight)
+ pos_gt_labels, pos_weight)
return cls_reg_targets
- def bbox_target(self, pos_bboxes_list, neg_bboxes_list, gt_labels, pos_weight):
+ def bbox_target(self, pos_bboxes_list, neg_bboxes_list, gt_labels,
+ pos_weight):
"""Generate classification targets for bboxes. """
labels, label_weights = [], []
pos_weight = 1.0 if pos_weight <= 0 else pos_weight
-
+
assert len(pos_bboxes_list) == len(neg_bboxes_list) == len(gt_labels)
length = len(pos_bboxes_list)
-
+
for i in range(length):
pos_bboxes = pos_bboxes_list[i]
neg_bboxes = neg_bboxes_list[i]
@@ -139,27 +147,38 @@ def bbox_target(self, pos_bboxes_list, neg_bboxes_list, gt_labels, pos_weight):
num_neg = 0
num_samples = num_pos + num_neg
neg_label = paddle.zeros([num_neg, gt_label.shape[1]])
- label = paddle.concat([gt_label,neg_label])
+ label = paddle.concat([gt_label, neg_label])
labels.append(label)
-
+
labels = paddle.concat(labels, 0)
return labels
def recall_prec(self, pred_vec, target_vec):
- correct = paddle.to_tensor(np.logical_and(pred_vec.numpy(), target_vec.numpy()))
- correct = paddle.where(correct,
- paddle.full(correct.shape,1,dtype='int32'),
- paddle.full(correct.shape,0,dtype='int32'))
+ correct = paddle.to_tensor(
+ np.logical_and(pred_vec.numpy(), target_vec.numpy()))
+ correct = paddle.where(
+ correct,
+ paddle.full(
+ correct.shape, 1, dtype='int32'),
+ paddle.full(
+ correct.shape, 0, dtype='int32'))
recall_correct = paddle.cast(paddle.sum(correct, axis=1), 'float32')
- target_vec = paddle.where(target_vec,
- paddle.full(target_vec.shape,1,dtype='int32'),
- paddle.full(target_vec.shape,0,dtype='int32'))
- recall_target = paddle.cast(paddle.sum(target_vec, axis=1),'float32')
+ target_vec = paddle.where(
+ target_vec,
+ paddle.full(
+ target_vec.shape, 1, dtype='int32'),
+ paddle.full(
+ target_vec.shape, 0, dtype='int32'))
+ recall_target = paddle.cast(paddle.sum(target_vec, axis=1), 'float32')
recall = recall_correct / recall_target
- pred_vec = paddle.where(pred_vec,
- paddle.full(pred_vec.shape,1,dtype='int32'),
- paddle.full(pred_vec.shape,0,dtype='int32'))
- prec_target = paddle.cast(paddle.sum(pred_vec, axis=1) + 1e-6, 'float32')
+ pred_vec = paddle.where(
+ pred_vec,
+ paddle.full(
+ pred_vec.shape, 1, dtype='int32'),
+ paddle.full(
+ pred_vec.shape, 0, dtype='int32'))
+ prec_target = paddle.cast(
+ paddle.sum(pred_vec, axis=1) + 1e-6, 'float32')
prec = recall_correct / prec_target
recall_mean = paddle.mean(recall)
prec_mean = paddle.mean(prec)
@@ -173,29 +192,30 @@ def multilabel_accuracy(self, pred, target, thr=0.5):
recalls, precs = [], []
for k in self.topk:
_, pred_label = paddle.topk(pred, k, 1, True, True)
- pred_vec = paddle.full(pred.shape,0,dtype='bool')
+ pred_vec = paddle.full(pred.shape, 0, dtype='bool')
num_sample = pred.shape[0]
for i in range(num_sample):
- pred_vec[i, pred_label[i].numpy()] = 1
+ pred_vec[i, pred_label[i].numpy()] = 1
recall_k, prec_k = self.recall_prec(pred_vec, target_vec)
recalls.append(recall_k)
precs.append(prec_k)
return recall_thr, prec_thr, recalls, precs
- def loss(self,
- cls_score,
- labels):
+ def loss(self, cls_score, labels):
losses = dict()
if cls_score is not None:
# Only use the cls_score
labels = labels[:, 1:]
pos_inds_bool = paddle.sum(labels, axis=-1) > 0
- pos_inds = paddle.where(paddle.sum(labels, axis=-1) > 0,
- paddle.full([labels.shape[0]],1,dtype='int32'),
- paddle.full([labels.shape[0]],0,dtype='int32'))
+ pos_inds = paddle.where(
+ paddle.sum(labels, axis=-1) > 0,
+ paddle.full(
+ [labels.shape[0]], 1, dtype='int32'),
+ paddle.full(
+ [labels.shape[0]], 0, dtype='int32'))
pos_inds = paddle.nonzero(pos_inds, as_tuple=False)
cls_score = paddle.index_select(cls_score, pos_inds, axis=0)
- cls_score = cls_score[:, 1:]
+ cls_score = cls_score[:, 1:]
labels = paddle.index_select(labels, pos_inds, axis=0)
bce_loss = F.binary_cross_entropy_with_logits
loss = bce_loss(cls_score, labels, reduction='none')
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/cfbi_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/cfbi_head.py
old mode 100644
new mode 100755
index f7cbd910e..729a4f618
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/cfbi_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/cfbi_head.py
@@ -62,12 +62,12 @@ def __init__(self, num_channels, epsilon=1e-5, mode='l2', after_relu=False):
def forward(self, x):
if self.mode == 'l2':
- embedding = paddle.pow(
- paddle.sum(paddle.pow(x, 2), axis=[2, 3], keepdim=True) +
- self.epsilon, 0.5) * self.alpha
- norm = self.gamma / paddle.pow(
- (paddle.mean(paddle.pow(embedding, 2), axis=1, keepdim=True) +
- self.epsilon), 0.5)
+ embedding = paddle.pow(paddle.sum(
+ paddle.pow(x, 2), axis=[2, 3], keepdim=True) + self.epsilon,
+ 0.5) * self.alpha
+ norm = self.gamma / paddle.pow((paddle.mean(
+ paddle.pow(embedding, 2), axis=1, keepdim=True) + self.epsilon),
+ 0.5)
elif self.mode == 'l1':
if not self.after_relu:
_x = paddle.abs(x)
@@ -95,30 +95,30 @@ def __init__(self, inplanes, outplanes, stride=1, dilation=1):
self.conv1 = nn.Conv2D(inplanes, planes, kernel_size=1, bias_attr=False)
self.bn1 = nn.GroupNorm(num_groups=32, num_channels=planes)
- self.conv2 = nn.Conv2D(planes,
- planes,
- kernel_size=3,
- stride=stride,
- dilation=dilation,
- padding=dilation,
- bias_attr=False)
+ self.conv2 = nn.Conv2D(
+ planes,
+ planes,
+ kernel_size=3,
+ stride=stride,
+ dilation=dilation,
+ padding=dilation,
+ bias_attr=False)
self.bn2 = nn.GroupNorm(num_groups=32, num_channels=planes)
- self.conv3 = nn.Conv2D(planes,
- planes * expansion,
- kernel_size=1,
- bias_attr=False)
+ self.conv3 = nn.Conv2D(
+ planes, planes * expansion, kernel_size=1, bias_attr=False)
self.bn3 = nn.GroupNorm(num_groups=32, num_channels=planes * expansion)
self.relu = nn.ReLU()
if stride != 1 or inplanes != planes * expansion:
downsample = nn.Sequential(
- nn.Conv2D(inplanes,
- planes * expansion,
- kernel_size=1,
- stride=stride,
- bias_attr=False),
- nn.GroupNorm(num_groups=32, num_channels=planes * expansion),
- )
+ nn.Conv2D(
+ inplanes,
+ planes * expansion,
+ kernel_size=1,
+ stride=stride,
+ bias_attr=False),
+ nn.GroupNorm(
+ num_groups=32, num_channels=planes * expansion), )
else:
downsample = None
self.downsample = downsample
@@ -157,13 +157,14 @@ class _ASPPModule(nn.Layer):
def __init__(self, inplanes, planes, kernel_size, padding, dilation):
super(_ASPPModule, self).__init__()
self.GCT = GCT(inplanes)
- self.atrous_conv = nn.Conv2D(inplanes,
- planes,
- kernel_size=kernel_size,
- stride=1,
- padding=padding,
- dilation=dilation,
- bias_attr=False)
+ self.atrous_conv = nn.Conv2D(
+ inplanes,
+ planes,
+ kernel_size=kernel_size,
+ stride=1,
+ padding=padding,
+ dilation=dilation,
+ bias_attr=False)
self.bn = nn.GroupNorm(num_groups=int(planes / 4), num_channels=planes)
self.relu = nn.ReLU()
@@ -192,30 +193,20 @@ def __init__(self):
inplanes = 512
dilations = [1, 6, 12, 18]
- self.aspp1 = _ASPPModule(inplanes,
- 128,
- 1,
- padding=0,
- dilation=dilations[0])
- self.aspp2 = _ASPPModule(inplanes,
- 128,
- 3,
- padding=dilations[1],
- dilation=dilations[1])
- self.aspp3 = _ASPPModule(inplanes,
- 128,
- 3,
- padding=dilations[2],
- dilation=dilations[2])
- self.aspp4 = _ASPPModule(inplanes,
- 128,
- 3,
- padding=dilations[3],
- dilation=dilations[3])
+ self.aspp1 = _ASPPModule(
+ inplanes, 128, 1, padding=0, dilation=dilations[0])
+ self.aspp2 = _ASPPModule(
+ inplanes, 128, 3, padding=dilations[1], dilation=dilations[1])
+ self.aspp3 = _ASPPModule(
+ inplanes, 128, 3, padding=dilations[2], dilation=dilations[2])
+ self.aspp4 = _ASPPModule(
+ inplanes, 128, 3, padding=dilations[3], dilation=dilations[3])
self.global_avg_pool = nn.Sequential(
nn.AdaptiveAvgPool2D((1, 1)),
- nn.Conv2D(inplanes, 128, 1, stride=1, bias_attr=False), nn.ReLU())
+ nn.Conv2D(
+ inplanes, 128, 1, stride=1, bias_attr=False),
+ nn.ReLU())
self.GCT = GCT(640)
self.conv1 = nn.Conv2D(640, 256, 1, bias_attr=False)
@@ -229,10 +220,8 @@ def forward(self, x):
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
- x5 = F.interpolate(x5,
- size=x4.shape[2:],
- mode='bilinear',
- align_corners=True)
+ x5 = F.interpolate(
+ x5, size=x4.shape[2:], mode='bilinear', align_corners=True)
x = paddle.concat([x1, x2, x3, x4, x5], axis=1)
x = self.GCT(x)
@@ -254,14 +243,13 @@ def _init_weight(self):
@HEADS.register()
class CollaborativeEnsemblerMS(nn.Layer):
def __init__(
- self,
- model_semantic_embedding_dim=256,
- model_multi_local_distance=[[4, 8, 12, 16, 20, 24],
- [2, 4, 6, 8, 10, 12], [2, 4, 6, 8, 10]],
- model_head_embedding_dim=256,
- model_refine_channels=64,
- model_low_level_inplanes=256,
- ):
+ self,
+ model_semantic_embedding_dim=256,
+ model_multi_local_distance=[[4, 8, 12, 16, 20, 24],
+ [2, 4, 6, 8, 10, 12], [2, 4, 6, 8, 10]],
+ model_head_embedding_dim=256,
+ model_refine_channels=64,
+ model_low_level_inplanes=256, ):
super(CollaborativeEnsemblerMS, self).__init__()
in_dim_4x = model_semantic_embedding_dim * 3 + 3 + 2 * len(
model_multi_local_distance[0])
@@ -313,28 +301,28 @@ def __init__(
# Decoder
self.GCT_sc = GCT(low_level_dim + embed_dim)
- self.conv_sc = nn.Conv2D(low_level_dim + embed_dim,
- refine_dim,
- 1,
- bias_attr=False)
- self.bn_sc = nn.GroupNorm(num_groups=int(refine_dim / 4),
- num_channels=refine_dim)
+ self.conv_sc = nn.Conv2D(
+ low_level_dim + embed_dim, refine_dim, 1, bias_attr=False)
+ self.bn_sc = nn.GroupNorm(
+ num_groups=int(refine_dim / 4), num_channels=refine_dim)
self.relu = nn.ReLU()
self.IA10 = IA_gate(IA_in_dim, embed_dim + refine_dim)
- self.conv1 = nn.Conv2D(embed_dim + refine_dim,
- int(embed_dim / 2),
- kernel_size=3,
- padding=1,
- bias_attr=False)
+ self.conv1 = nn.Conv2D(
+ embed_dim + refine_dim,
+ int(embed_dim / 2),
+ kernel_size=3,
+ padding=1,
+ bias_attr=False)
self.bn1 = nn.GroupNorm(num_groups=32, num_channels=int(embed_dim / 2))
self.IA11 = IA_gate(IA_in_dim, int(embed_dim / 2))
- self.conv2 = nn.Conv2D(int(embed_dim / 2),
- int(embed_dim / 2),
- kernel_size=3,
- padding=1,
- bias_attr=False)
+ self.conv2 = nn.Conv2D(
+ int(embed_dim / 2),
+ int(embed_dim / 2),
+ kernel_size=3,
+ padding=1,
+ bias_attr=False)
self.bn2 = nn.GroupNorm(num_groups=32, num_channels=int(embed_dim / 2))
# Output
@@ -405,14 +393,16 @@ def IA_logit(self, x, IA_head, IA_final):
IA_bias = paddle.reshape(IA_bias, [-1])
logit = paddle.reshape(
- F.conv2d(x, weight=IA_weight, bias=IA_bias, groups=n), [n, 1, h, w])
+ F.conv2d(
+ x, weight=IA_weight, bias=IA_bias, groups=n), [n, 1, h, w])
return logit
def decoder(self, x, low_level_feat, IA_head):
- x = F.interpolate(x,
- size=low_level_feat.shape[2:],
- mode='bicubic',
- align_corners=True)
+ x = F.interpolate(
+ x,
+ size=low_level_feat.shape[2:],
+ mode='bicubic',
+ align_corners=True)
low_level_feat = self.GCT_sc(low_level_feat)
low_level_feat = self.conv_sc(low_level_feat)
@@ -440,8 +430,8 @@ def augment_background_logit(self, fg_logit, bg_logit):
if obj_num > 1:
bg_logit = bg_logit[1:obj_num, :, :, :]
aug_bg_logit = paddle.min(bg_logit, axis=0, keepdim=True)
- pad = paddle.expand(paddle.zeros(aug_bg_logit.shape),
- [obj_num - 1, -1, -1, -1])
+ pad = paddle.expand(
+ paddle.zeros(aug_bg_logit.shape), [obj_num - 1, -1, -1, -1])
aug_bg_logit = paddle.concat([aug_bg_logit, pad], axis=0)
pred = pred + aug_bg_logit
pred = paddle.transpose(pred, [1, 0, 2, 3])
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/i3d_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/i3d_head.py
old mode 100644
new mode 100755
index 269c8184e..5df0abadf
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/i3d_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/i3d_head.py
@@ -36,6 +36,7 @@ class I3DHead(BaseHead):
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
+
def __init__(self,
num_classes,
in_channels,
@@ -58,8 +59,7 @@ def __init__(self,
self.in_channels,
self.num_classes,
weight_attr=ParamAttr(learning_rate=10.0),
- bias_attr=ParamAttr(learning_rate=10.0),
- )
+ bias_attr=ParamAttr(learning_rate=10.0), )
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/ops.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/ops.py
old mode 100644
new mode 100755
index 0c357fa70..de7ee9f13
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/ops.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/ops.py
@@ -244,10 +244,13 @@ def roi_align(input,
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
if chaj_debug:
- print("chajchaj, ops.py, bf core.ops.roi_align, type(rois):",type(rois))
- print("chajchaj, ops.py, bf core.ops.roi_align, rois.shape:",rois.shape)
- if rois.shape[0]>0:
- print("chajchaj, ops.py, bf core.ops.roi_align, (rois):",(rois))
+ print("chajchaj, ops.py, bf core.ops.roi_align, type(rois):",
+ type(rois))
+ print("chajchaj, ops.py, bf core.ops.roi_align, rois.shape:",
+ rois.shape)
+ if rois.shape[0] > 0:
+ print("chajchaj, ops.py, bf core.ops.roi_align, (rois):",
+ (rois))
align_out = core.ops.roi_align(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptimesformer_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptimesformer_head.py
old mode 100644
new mode 100755
index 113bde8b5..695ad9dcb
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptimesformer_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptimesformer_head.py
@@ -33,6 +33,7 @@ class ppTimeSformerHead(BaseHead):
kwargs (dict, optional): Any keyword argument to initialize.
"""
+
def __init__(self,
num_classes,
in_channels,
@@ -42,19 +43,21 @@ def __init__(self,
super().__init__(num_classes, in_channels, loss_cfg, **kwargs)
self.std = std
- self.fc = Linear(self.in_channels,
- self.num_classes,
- bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
+ self.fc = Linear(
+ self.in_channels,
+ self.num_classes,
+ bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
def init_weights(self):
"""Initiate the FC layer parameters"""
- weight_init_(self.fc,
- 'TruncatedNormal',
- 'fc_0.w_0',
- 'fc_0.b_0',
- mean=0.0,
- std=self.std)
+ weight_init_(
+ self.fc,
+ 'TruncatedNormal',
+ 'fc_0.w_0',
+ 'fc_0.b_0',
+ mean=0.0,
+ std=self.std)
# NOTE: Temporarily use trunc_normal_ instead of TruncatedNormal
trunc_normal_(self.fc.weight, std=self.std)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptsm_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptsm_head.py
old mode 100644
new mode 100755
index 88ad2a8e5..0cff6dc8a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptsm_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptsm_head.py
@@ -33,6 +33,7 @@ class ppTSMHead(TSNHead):
std(float): Std(Scale) value in normal initilizar. Default: 0.001.
kwargs (dict, optional): Any keyword argument to initialize.
"""
+
def __init__(self,
num_classes,
in_channels,
@@ -41,19 +42,21 @@ def __init__(self,
data_format="NCHW",
**kwargs):
- super().__init__(num_classes,
- in_channels,
- drop_ratio=drop_ratio,
- std=std,
- data_format=data_format,
- **kwargs)
+ super().__init__(
+ num_classes,
+ in_channels,
+ drop_ratio=drop_ratio,
+ std=std,
+ data_format=data_format,
+ **kwargs)
- self.fc = Linear(self.in_channels,
- self.num_classes,
- weight_attr=ParamAttr(learning_rate=5.0,
- regularizer=L2Decay(1e-4)),
- bias_attr=ParamAttr(learning_rate=10.0,
- regularizer=L2Decay(0.0)))
+ self.fc = Linear(
+ self.in_channels,
+ self.num_classes,
+ weight_attr=ParamAttr(
+ learning_rate=5.0, regularizer=L2Decay(1e-4)),
+ bias_attr=ParamAttr(
+ learning_rate=10.0, regularizer=L2Decay(0.0)))
self.stdv = std
def init_weights(self):
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptsn_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptsn_head.py
old mode 100644
new mode 100755
index 44314ac7d..35deac3bb
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptsn_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/pptsn_head.py
@@ -36,6 +36,7 @@ class ppTSNHead(BaseHead):
kwargs (dict, optional): Any keyword argument to initialize.
"""
+
def __init__(self,
num_classes,
in_channels,
@@ -60,19 +61,17 @@ def __init__(self,
self.fc = Linear(
self.in_channels,
self.num_classes,
- weight_attr=ParamAttr(learning_rate=5.0 if fclr5 else 1.0,
- regularizer=L2Decay(1e-4)),
- bias_attr=ParamAttr(learning_rate=10.0 if fclr5 else 1.0,
- regularizer=L2Decay(0.0)))
+ weight_attr=ParamAttr(
+ learning_rate=5.0
+ if fclr5 else 1.0, regularizer=L2Decay(1e-4)),
+ bias_attr=ParamAttr(
+ learning_rate=10.0
+ if fclr5 else 1.0, regularizer=L2Decay(0.0)))
def init_weights(self):
"""Initiate the FC layer parameters"""
- weight_init_(self.fc,
- 'Normal',
- 'fc_0.w_0',
- 'fc_0.b_0',
- mean=0.,
- std=self.std)
+ weight_init_(
+ self.fc, 'Normal', 'fc_0.w_0', 'fc_0.b_0', mean=0., std=self.std)
def forward(self, x, num_seg):
"""Define how the head is going to run.
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/roi_extractor.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/roi_extractor.py
old mode 100644
new mode 100755
index 2a6b93bea..d26d12b1a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/roi_extractor.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/roi_extractor.py
@@ -18,7 +18,6 @@
#@register
class RoIAlign(object):
-
def __init__(self,
resolution=14,
spatial_scale=0.0625,
@@ -35,21 +34,23 @@ def __call__(self, feats, roi, rois_num):
rois_num = paddle.to_tensor(rois_num, dtype='int32')
rois_num = paddle.cast(rois_num, dtype='int32')
if len(feats) == 1:
- roi_feat = ops.roi_align(feats,
- roi,
- self.resolution,
- self.spatial_scale,
- sampling_ratio=self.sampling_ratio,
- rois_num=rois_num,
- aligned=self.aligned)
+ roi_feat = ops.roi_align(
+ feats,
+ roi,
+ self.resolution,
+ self.spatial_scale,
+ sampling_ratio=self.sampling_ratio,
+ rois_num=rois_num,
+ aligned=self.aligned)
else:
rois_feat_list = []
- roi_feat = ops.roi_align(feats,
- roi,
- self.resolution,
- self.spatial_scale,
- sampling_ratio=self.sampling_ratio,
- rois_num=rois_num,
- aligned=self.aligned)
+ roi_feat = ops.roi_align(
+ feats,
+ roi,
+ self.resolution,
+ self.spatial_scale,
+ sampling_ratio=self.sampling_ratio,
+ rois_num=rois_num,
+ aligned=self.aligned)
return roi_feat
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/roi_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/roi_head.py
old mode 100644
new mode 100755
index d399f84de..22f204245
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/roi_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/roi_head.py
@@ -57,14 +57,14 @@ def bbox2result(bboxes, labels, num_classes, img_shape, thr=0.01):
result.append(
#对于step1中得分大于阈值的bbox(可能为空), 将bbox及在该类的score放入result列表.
- paddle.concat((bboxes_select, scores_select), axis=1))
+ paddle.concat(
+ (bboxes_select, scores_select), axis=1))
return result
@HEADS.register()
class AVARoIHead(nn.Layer):
-
def __init__(self,
assigner,
sampler,
@@ -101,9 +101,8 @@ def _bbox_forward(self, x, rois, rois_num):
cls_score, bbox_pred = self.bbox_head(
bbox_feat, rois, rois_num
) #deal with: when roi's width or height = 0 , roi_align is wrong
- bbox_results = dict(cls_score=cls_score,
- bbox_pred=bbox_pred,
- bbox_feats=bbox_feat)
+ bbox_results = dict(
+ cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feat)
return bbox_results
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels):
@@ -122,13 +121,10 @@ def train_step(self, x, img_metas, proposal_list, gt_bboxes, gt_labels):
num_imgs = len(img_metas[0])
sampling_results = []
for i in range(num_imgs):
- assign_result = self.bbox_assigner.assign(proposal_list[i],
- gt_bboxes[i],
- gt_labels[i])
- sampling_result = self.bbox_sampler.sample(assign_result,
- proposal_list[i],
- gt_bboxes[i],
- gt_labels[i])
+ assign_result = self.bbox_assigner.assign(
+ proposal_list[i], gt_bboxes[i], gt_labels[i])
+ sampling_result = self.bbox_sampler.sample(
+ assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i])
sampling_results.append(sampling_result)
#2. forward and loss
@@ -143,11 +139,8 @@ def simple_test(self, x, proposal_list, img_shape, rescale=False):
x_shape = x[0].shape
#assert x_shape[0] == 1, 'only accept 1 sample at test mode'
- det_bboxes, det_labels = self.simple_test_bboxes(x,
- img_shape,
- proposal_list,
- self.action_thr,
- rescale=rescale)
+ det_bboxes, det_labels = self.simple_test_bboxes(
+ x, img_shape, proposal_list, self.action_thr, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes, img_shape,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/single_straight3d.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/single_straight3d.py
old mode 100644
new mode 100755
index cf8569dd4..8b4a69d71
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/single_straight3d.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/single_straight3d.py
@@ -43,10 +43,11 @@ def __init__(self,
self.with_temporal_pool = with_temporal_pool
self.with_global = with_global
- self.roi_layer = RoIAlign(resolution=self.output_size,
- spatial_scale=self.spatial_scale,
- sampling_ratio=self.sampling_ratio,
- aligned=self.aligned)
+ self.roi_layer = RoIAlign(
+ resolution=self.output_size,
+ spatial_scale=self.spatial_scale,
+ sampling_ratio=self.sampling_ratio,
+ aligned=self.aligned)
def init_weights(self):
pass
@@ -71,8 +72,8 @@ def forward(self, feat, rois, rois_num):
index = paddle.to_tensor(data_index)
frame_feat = paddle.index_select(feat, index, axis=2)
- frame_feat = paddle.squeeze(frame_feat,
- axis=2) #axis=2,避免N=1时, 第一维度被删除.
+ frame_feat = paddle.squeeze(
+ frame_feat, axis=2) #axis=2,避免N=1时, 第一维度被删除.
roi_feat = self.roi_layer(frame_feat, rois, rois_num)
roi_feats.append(roi_feat)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/slowfast_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/slowfast_head.py
old mode 100644
new mode 100755
index bd18bafda..daaf0f011
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/slowfast_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/slowfast_head.py
@@ -30,6 +30,7 @@ class SlowFastHead(BaseHead):
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
+
def __init__(self,
width_per_group,
alpha,
@@ -84,39 +85,37 @@ def __init__(self,
],
]
- assert (len({len(self.pool_size), len(self.dim_in)
- }) == 1), "pathway dimensions are not consistent."
+ assert (len({len(self.pool_size), len(self.dim_in)}) == 1
+ ), "pathway dimensions are not consistent."
self.num_pathways = len(self.pool_size)
self.dropout = paddle.nn.Dropout(p=self.dropout_rate)
self.projection = paddle.nn.Linear(
in_features=sum(self.dim_in),
- out_features=self.num_classes,
- )
+ out_features=self.num_classes, )
def init_weights(self):
- weight_init_(self.projection,
- "Normal",
- bias_value=0.0,
- mean=0.0,
- std=0.01)
+ weight_init_(
+ self.projection, "Normal", bias_value=0.0, mean=0.0, std=0.01)
def forward(self, inputs):
- assert (len(inputs) == self.num_pathways
- ), "Input tensor does not contain {} pathway".format(
- self.num_pathways)
+ assert (
+ len(inputs) == self.num_pathways
+ ), "Input tensor does not contain {} pathway".format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
if self.pool_size[pathway] is None:
- tmp_out = F.adaptive_avg_pool3d(x=inputs[pathway],
- output_size=(1, 1, 1),
- data_format="NCDHW")
+ tmp_out = F.adaptive_avg_pool3d(
+ x=inputs[pathway],
+ output_size=(1, 1, 1),
+ data_format="NCDHW")
else:
- tmp_out = F.avg_pool3d(x=inputs[pathway],
- kernel_size=self.pool_size[pathway],
- stride=1,
- data_format="NCDHW")
+ tmp_out = F.avg_pool3d(
+ x=inputs[pathway],
+ kernel_size=self.pool_size[pathway],
+ stride=1,
+ data_format="NCDHW")
pool_out.append(tmp_out)
x = paddle.concat(x=pool_out, axis=1)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/stgcn_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/stgcn_head.py
old mode 100644
new mode 100755
index fc80d6633..370190c8e
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/stgcn_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/stgcn_head.py
@@ -28,11 +28,11 @@ class STGCNHead(BaseHead):
in_channels: int, input feature channels. Default: 256.
num_classes: int, number classes. Default: 10.
"""
+
def __init__(self, in_channels=256, num_classes=10, **kwargs):
super().__init__(num_classes, in_channels, **kwargs)
- self.fcn = nn.Conv2D(in_channels=in_channels,
- out_channels=num_classes,
- kernel_size=1)
+ self.fcn = nn.Conv2D(
+ in_channels=in_channels, out_channels=num_classes, kernel_size=1)
def init_weights(self):
"""Initiate the parameters.
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/timesformer_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/timesformer_head.py
old mode 100644
new mode 100755
index d02a3cca8..c80032df4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/timesformer_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/timesformer_head.py
@@ -31,6 +31,7 @@ class TimeSformerHead(BaseHead):
kwargs (dict, optional): Any keyword argument to initialize.
"""
+
def __init__(self,
num_classes,
in_channels,
@@ -45,12 +46,13 @@ def __init__(self,
def init_weights(self):
"""Initiate the FC layer parameters"""
- weight_init_(self.fc,
- 'TruncatedNormal',
- 'fc_0.w_0',
- 'fc_0.b_0',
- mean=0.0,
- std=self.std)
+ weight_init_(
+ self.fc,
+ 'TruncatedNormal',
+ 'fc_0.w_0',
+ 'fc_0.b_0',
+ mean=0.0,
+ std=self.std)
# NOTE: Temporarily use trunc_normal_ instead of TruncatedNormal
trunc_normal_(self.fc.weight, std=self.std)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/transnetv2_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/transnetv2_head.py
old mode 100644
new mode 100755
index 2ea67d4d3..90c2c52d6
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/transnetv2_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/transnetv2_head.py
@@ -17,21 +17,24 @@
from ..losses import TransNetV2Loss
from ...metrics.transnetv2_metric import create_scene_based_summaries
+
@HEADS.register()
class TransNetV2Head(BaseHead):
"""TransNetV2 Head.
"""
+
def __init__(self,
num_classes,
in_channels,
- loss_cfg=dict(name="TransNetV2Loss")
- ):
- super().__init__(num_classes,
- in_channels,
- loss_cfg)
+ loss_cfg=dict(name="TransNetV2Loss")):
+ super().__init__(num_classes, in_channels, loss_cfg)
- def loss(self, one_hot_pred, one_hot_gt,
- many_hot_pred=None, many_hot_gt=None, reg_losses=None):
+ def loss(self,
+ one_hot_pred,
+ one_hot_gt,
+ many_hot_pred=None,
+ many_hot_gt=None,
+ reg_losses=None):
losses = dict()
loss = self.loss_func(scores, labels, **kwargs)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/tsm_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/tsm_head.py
old mode 100644
new mode 100755
index 955930168..835ef068b
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/tsm_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/tsm_head.py
@@ -36,6 +36,7 @@ class TSMHead(TSNHead):
std(float): Std(Scale) value in normal initilizar. Default: 0.001.
kwargs (dict, optional): Any keyword argument to initialize.
"""
+
def __init__(self,
num_classes,
in_channels,
@@ -43,23 +44,25 @@ def __init__(self,
std=0.001,
data_format="NCHW",
**kwargs):
- super().__init__(num_classes,
- in_channels,
- drop_ratio=drop_ratio,
- std=std,
- data_format=data_format,
- **kwargs)
-
- self.fc = Linear(self.in_channels,
- self.num_classes,
- weight_attr=ParamAttr(learning_rate=5.0,
- regularizer=L2Decay(1e-4)),
- bias_attr=ParamAttr(learning_rate=10.0,
- regularizer=L2Decay(0.0)))
-
- assert (data_format in [
- 'NCHW', 'NHWC'
- ]), f"data_format must be 'NCHW' or 'NHWC', but got {data_format}"
+ super().__init__(
+ num_classes,
+ in_channels,
+ drop_ratio=drop_ratio,
+ std=std,
+ data_format=data_format,
+ **kwargs)
+
+ self.fc = Linear(
+ self.in_channels,
+ self.num_classes,
+ weight_attr=ParamAttr(
+ learning_rate=5.0, regularizer=L2Decay(1e-4)),
+ bias_attr=ParamAttr(
+ learning_rate=10.0, regularizer=L2Decay(0.0)))
+
+ assert (
+ data_format in ['NCHW', 'NHWC']
+ ), f"data_format must be 'NCHW' or 'NHWC', but got {data_format}"
self.data_format = data_format
@@ -93,7 +96,7 @@ def forward(self, x, num_seg):
score = paddle.reshape(
score, [-1, num_seg, score.shape[1]]) # [N, num_seg, num_class]
score = paddle.mean(score, axis=1) # [N, num_class]
- score = paddle.reshape(score,
- shape=[-1, self.num_classes]) # [N, num_class]
+ score = paddle.reshape(
+ score, shape=[-1, self.num_classes]) # [N, num_class]
# score = F.softmax(score) #NOTE remove
return score
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/tsn_head.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/tsn_head.py
old mode 100644
new mode 100755
index f2f906bce..290de3c9c
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/tsn_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/tsn_head.py
@@ -33,6 +33,7 @@ class TSNHead(BaseHead):
kwargs (dict, optional): Any keyword argument to initialize.
"""
+
def __init__(self,
num_classes,
in_channels,
@@ -59,12 +60,8 @@ def __init__(self,
def init_weights(self):
"""Initiate the FC layer parameters"""
- weight_init_(self.fc,
- 'Normal',
- 'fc_0.w_0',
- 'fc_0.b_0',
- mean=0.,
- std=self.std)
+ weight_init_(
+ self.fc, 'Normal', 'fc_0.w_0', 'fc_0.b_0', mean=0., std=self.std)
def forward(self, x, num_seg):
"""Define how the head is going to run.
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/actbert_loss.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/actbert_loss.py
old mode 100644
new mode 100755
index 10ffea6e6..aab90e7a5
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/actbert_loss.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/actbert_loss.py
@@ -25,6 +25,7 @@
class ActBertLoss(BaseWeightedLoss):
"""Loss for ActBert model
"""
+
def __init__(self, vocab_size=30522, a_target_size=700):
super().__init__()
self.vocab_size = vocab_size
@@ -47,11 +48,13 @@ def forward(self, prediction_scores_t, prediction_scores_v, prediction_scores_a,
1:] #8,37,1601 --> 8,36,1601
img_loss = self.vis_criterion(
- F.log_softmax(prediction_scores_v, axis=2),
+ F.log_softmax(
+ prediction_scores_v, axis=2),
image_target #8,36,1601
)
masked_img_loss = paddle.sum(
- img_loss * (image_label == 1).unsqueeze(2).astype('float32')) / max(
+ img_loss *
+ (image_label == 1).unsqueeze(2).astype('float32')) / max(
paddle.sum((image_label == 1).astype('float32')), 1e-6)
masked_text_loss = self.loss_fct(
@@ -70,6 +73,6 @@ def forward(self, prediction_scores_t, prediction_scores_v, prediction_scores_a,
)
total_loss = masked_text_loss.unsqueeze(0) + masked_img_loss.unsqueeze(
- 0) + masked_action_loss.unsqueeze(0) + next_sentence_loss.unsqueeze(
- 0)
+ 0) + masked_action_loss.unsqueeze(
+ 0) + next_sentence_loss.unsqueeze(0)
return total_loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/base.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/base.py
old mode 100644
new mode 100755
index 7284252e6..c818f0b05
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/base.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/base.py
@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from abc import abstractmethod
+from abc import abstractmethod
import paddle
import paddle.nn as nn
+
#XXX use _forward?? or forward??
class BaseWeightedLoss(nn.Layer):
"""Base class for loss.
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/bmn_loss.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/bmn_loss.py
old mode 100644
new mode 100755
index 90b8e4397..36a0f80d4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/bmn_loss.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/bmn_loss.py
@@ -27,6 +27,7 @@ class BMNLoss(BaseWeightedLoss):
tscale (int): sequence length, default 100.
dscale (int): max duration length, default 100.
"""
+
def __init__(self, dscale, tscale, datatype='float32'):
super().__init__()
self.dscale = dscale
@@ -59,8 +60,8 @@ def bi_loss(pred_score, gt_label, datatype):
temp = paddle.log(pred_score + epsilon)
loss_pos = paddle.multiply(paddle.log(pred_score + epsilon), pmask)
loss_pos = coef_1 * paddle.mean(loss_pos)
- loss_neg = paddle.multiply(paddle.log(1.0 - pred_score + epsilon),
- (1.0 - pmask))
+ loss_neg = paddle.multiply(
+ paddle.log(1.0 - pred_score + epsilon), (1.0 - pmask))
loss_neg = coef_0 * paddle.mean(loss_neg)
loss = -1 * (loss_pos + loss_neg)
return loss
@@ -126,24 +127,20 @@ def pem_cls_loss_func(self, pred_score, gt_iou_map, mask):
epsilon = 0.000001
loss_pos = paddle.multiply(paddle.log(pred_score + epsilon), pmask)
loss_pos = coef_1 * paddle.sum(loss_pos)
- loss_neg = paddle.multiply(paddle.log(1.0 - pred_score + epsilon),
- nmask)
+ loss_neg = paddle.multiply(
+ paddle.log(1.0 - pred_score + epsilon), nmask)
loss_neg = coef_0 * paddle.sum(loss_neg)
loss = -1 * (loss_pos + loss_neg) / num_entries
return loss
def forward(self, pred_bm, pred_start, pred_end, gt_iou_map, gt_start,
gt_end):
- pred_bm_reg = paddle.squeeze(paddle.slice(pred_bm,
- axes=[1],
- starts=[0],
- ends=[1]),
- axis=[1])
- pred_bm_cls = paddle.squeeze(paddle.slice(pred_bm,
- axes=[1],
- starts=[1],
- ends=[2]),
- axis=[1])
+ pred_bm_reg = paddle.squeeze(
+ paddle.slice(
+ pred_bm, axes=[1], starts=[0], ends=[1]), axis=[1])
+ pred_bm_cls = paddle.squeeze(
+ paddle.slice(
+ pred_bm, axes=[1], starts=[1], ends=[2]), axis=[1])
bm_mask = self._get_mask(self.dscale, self.tscale)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/cross_entropy_loss.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/cross_entropy_loss.py
old mode 100644
new mode 100755
index 953f77c07..325ee35b5
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/cross_entropy_loss.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/cross_entropy_loss.py
@@ -22,6 +22,7 @@
@LOSSES.register()
class CrossEntropyLoss(BaseWeightedLoss):
"""Cross Entropy Loss."""
+
def _forward(self, score, labels, **kwargs):
"""Forward function.
Args:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/depth_loss.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/depth_loss.py
old mode 100644
new mode 100755
index ba9a2cb04..6660b65bd
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/depth_loss.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/depth_loss.py
@@ -26,12 +26,10 @@ def get_smooth_loss(disp, img):
grad_disp_x = paddle.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:])
grad_disp_y = paddle.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :])
- grad_img_x = paddle.mean(paddle.abs(img[:, :, :, :-1] - img[:, :, :, 1:]),
- 1,
- keepdim=True)
- grad_img_y = paddle.mean(paddle.abs(img[:, :, :-1, :] - img[:, :, 1:, :]),
- 1,
- keepdim=True)
+ grad_img_x = paddle.mean(
+ paddle.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)
+ grad_img_y = paddle.mean(
+ paddle.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)
grad_disp_x *= paddle.exp(-grad_img_x)
grad_disp_y *= paddle.exp(-grad_img_y)
@@ -54,9 +52,9 @@ def forward(self, input1, input2):
diff_loss = 0
dim = input1.shape[1]
for i in range(input1.shape[0]):
- diff_loss = diff_loss + paddle.mean(
- ((input1_l2[i:i + 1, :].mm(input2_l2[i:i + 1, :].T)).pow(2)) /
- dim)
+ diff_loss = diff_loss + paddle.mean((
+ (input1_l2[i:i + 1, :].mm(input2_l2[i:i + 1, :].T)).pow(2)) /
+ dim)
diff_loss = diff_loss / input1.shape[0]
@@ -90,6 +88,7 @@ def forward(self, pred, real):
class SSIM(nn.Layer):
"""Layer to compute the SSIM loss between a pair of images
"""
+
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2D(3, 1, exclusive=False)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/transnetv2_loss.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/transnetv2_loss.py
old mode 100644
new mode 100755
index 624c46852..d6cc3bb6a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/transnetv2_loss.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/transnetv2_loss.py
@@ -22,19 +22,25 @@
class TransNetV2Loss(BaseWeightedLoss):
"""Loss for TransNetV2 model
"""
+
def __init__(self, transition_weight=5.0, many_hot_loss_weight=0.1):
self.transition_weight = transition_weight
self.many_hot_loss_weight = many_hot_loss_weight
super().__init__()
- def _forward(self, one_hot_pred, one_hot_gt,
- many_hot_pred=None, many_hot_gt=None, reg_losses=None):
+ def _forward(self,
+ one_hot_pred,
+ one_hot_gt,
+ many_hot_pred=None,
+ many_hot_gt=None,
+ reg_losses=None):
assert transition_weight != 1
one_hot_pred = one_hot_pred[:, :, 0]
one_hot_gt = one_hot_gt.astype('float32')
- one_hot_loss = F.binary_cross_entropy_with_logits(logit=one_hot_pred, label=one_hot_gt, reduction='none')
+ one_hot_loss = F.binary_cross_entropy_with_logits(
+ logit=one_hot_pred, label=one_hot_gt, reduction='none')
one_hot_loss *= 1 + one_hot_gt * (transition_weight - 1)
@@ -43,8 +49,10 @@ def _forward(self, one_hot_pred, one_hot_gt,
many_hot_loss = 0.
if many_hot_loss_weight != 0. and many_hot_pred is not None:
many_hot_loss = many_hot_loss_weight * paddle.mean(
- F.binary_cross_entropy_with_logits(logit=many_hot_pred[:, :, 0],
- label=many_hot_gt.astype('float32'), reduction='none'))
+ F.binary_cross_entropy_with_logits(
+ logit=many_hot_pred[:, :, 0],
+ label=many_hot_gt.astype('float32'),
+ reduction='none'))
total_loss = one_hot_loss + many_hot_loss
@@ -53,4 +61,4 @@ def _forward(self, one_hot_pred, one_hot_gt,
if value is not None:
total_loss += value
- return total_loss
\ No newline at end of file
+ return total_loss
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/registry.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/registry.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/samplers/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/samplers/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/samplers/random_sampler.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/samplers/random_sampler.py
old mode 100644
new mode 100755
index 37397eca5..fd7297be4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/samplers/random_sampler.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/samplers/random_sampler.py
@@ -15,6 +15,7 @@
import numpy as np
from ..registry import BBOX_SAMPLERS
+
class SamplingResult():
"""Bbox sampling result. """
@@ -22,17 +23,18 @@ def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
- self.pos_bboxes = paddle.index_select(bboxes,pos_inds)
-
+ self.pos_bboxes = paddle.index_select(bboxes, pos_inds)
+
# neg_inds may be empty
- if neg_inds.shape[0]!=0:
- self.neg_bboxes = paddle.index_select(bboxes,neg_inds)
+ if neg_inds.shape[0] != 0:
+ self.neg_bboxes = paddle.index_select(bboxes, neg_inds)
else:
- self.neg_bboxes=None
-
- self.pos_is_gt = paddle.index_select(gt_flags,pos_inds)
+ self.neg_bboxes = None
+
+ self.pos_is_gt = paddle.index_select(gt_flags, pos_inds)
self.num_gts = gt_bboxes.shape[0]
- self.pos_assigned_gt_inds = paddle.index_select(assign_result.gt_inds,pos_inds) - 1
+ self.pos_assigned_gt_inds = paddle.index_select(assign_result.gt_inds,
+ pos_inds) - 1
if gt_bboxes.numel().numpy()[0] == 0:
assert self.pos_assigned_gt_inds.numel() == 0
@@ -41,10 +43,12 @@ def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
if len(gt_bboxes.shape) < 2:
gt_bboxes = gt_bboxes.view(-1, 4)
- self.pos_gt_bboxes = paddle.index_select(gt_bboxes, self.pos_assigned_gt_inds)
+ self.pos_gt_bboxes = paddle.index_select(gt_bboxes,
+ self.pos_assigned_gt_inds)
if assign_result.labels is not None:
- self.pos_gt_labels = paddle.index_select(assign_result.labels, pos_inds)
+ self.pos_gt_labels = paddle.index_select(assign_result.labels,
+ pos_inds)
else:
self.pos_gt_labels = None
@@ -58,7 +62,6 @@ def bboxes(self):
return ret
-
@BBOX_SAMPLERS.register()
class RandomSampler():
def __init__(self,
@@ -71,7 +74,7 @@ def __init__(self,
self.pos_fraction = pos_fraction
self.neg_pos_ub = neg_pos_ub
self.add_gt_as_proposals = add_gt_as_proposals
-
+
def sample(self,
assign_result,
bboxes,
@@ -97,7 +100,8 @@ def sample(self,
#1. 得到正样本的数量, inds
num_expected_pos = int(self.num * self.pos_fraction)
- pos_inds = self._sample_pos( assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
+ pos_inds = self._sample_pos(
+ assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
pos_inds = paddle.to_tensor(np.unique(pos_inds.numpy()))
#2. 得到负样本的数量, inds
@@ -111,12 +115,13 @@ def sample(self,
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
+
def random_choice(self, gallery, num):
"""Random select some elements from the gallery. """
assert len(gallery) >= num
perm = paddle.arange(gallery.numel())[:num]
- perm = paddle.randperm(gallery.numel())[:num]
+ perm = paddle.randperm(gallery.numel())[:num]
rand_inds = paddle.index_select(gallery, perm)
return rand_inds
@@ -129,7 +134,7 @@ def _sample_pos(self, assign_result, num_expected, **kwargs):
# 当pos_inds的数目小于num_expected(想要的sample的最大数目), 就直接用这个pos_inds
# 反之就从这么多index里随机采样num_expected个出来
if pos_inds.numel().numpy()[0] != 0:
- pos_inds = pos_inds.squeeze()
+ pos_inds = pos_inds.squeeze()
if pos_inds.numel().numpy()[0] <= num_expected:
return pos_inds
else:
@@ -139,7 +144,7 @@ def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = paddle.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel().numpy()[0] != 0:
- neg_inds = neg_inds.squeeze()
+ neg_inds = neg_inds.squeeze()
if (neg_inds.numel().numpy()[0]) <= num_expected.numpy()[0]:
return neg_inds
else:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/weight_init.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/weight_init.py
old mode 100644
new mode 100755
index 472289526..ff835e57b
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/weight_init.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/weight_init.py
@@ -73,8 +73,8 @@ def norm_cdf(x):
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to [2l-1, 2u-1].
- tmp = np.random.uniform(2 * l - 1, 2 * u - 1,
- size=list(tensor.shape)).astype(np.float32)
+ tmp = np.random.uniform(
+ 2 * l - 1, 2 * u - 1, size=list(tensor.shape)).astype(np.float32)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
@@ -118,9 +118,8 @@ def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
- raise ValueError(
- "Mode {} not supported, please use one of {}".format(
- mode, valid_modes))
+ raise ValueError("Mode {} not supported, please use one of {}".
+ format(mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
@@ -143,11 +142,12 @@ def calculate_gain(nonlinearity, param=None):
param, int) or isinstance(param, float):
negative_slope = param
else:
- raise ValueError(
- "negative_slope {} not a valid number".format(param))
+ raise ValueError("negative_slope {} not a valid number".format(
+ param))
return math.sqrt(2.0 / (1 + negative_slope**2))
else:
- raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
+ raise ValueError("Unsupported nonlinearity {}".format(
+ nonlinearity))
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/custom_lr.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/custom_lr.py
old mode 100644
new mode 100755
index cd02c4e97..52906caa0
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/custom_lr.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/custom_lr.py
@@ -36,6 +36,7 @@ class CustomWarmupCosineDecay(LRScheduler):
Returns:
``CosineAnnealingDecay`` instance to schedule learning rate.
"""
+
def __init__(self,
warmup_start_lr,
warmup_epochs,
@@ -50,8 +51,8 @@ def __init__(self,
self.max_epoch = max_epoch
self.num_iters = num_iters
#call step() in base class, last_lr/last_epoch/base_lr will be update
- super(CustomWarmupCosineDecay, self).__init__(last_epoch=last_epoch,
- verbose=verbose)
+ super(CustomWarmupCosineDecay, self).__init__(
+ last_epoch=last_epoch, verbose=verbose)
def step(self, epoch=None):
"""
@@ -76,8 +77,8 @@ def step(self, epoch=None):
self.last_epoch, self.__class__.__name__, self.last_lr))
def _lr_func_cosine(self, cur_epoch, cosine_base_lr, max_epoch):
- return cosine_base_lr * (math.cos(math.pi * cur_epoch / max_epoch) +
- 1.0) * 0.5
+ return cosine_base_lr * (
+ math.cos(math.pi * cur_epoch / max_epoch) + 1.0) * 0.5
def get_lr(self):
"""Define lr policy"""
@@ -109,6 +110,7 @@ class CustomWarmupPiecewiseDecay(LRScheduler):
Returns:
``CustomWarmupPiecewiseDecay`` instance to schedule learning rate.
"""
+
def __init__(self,
warmup_start_lr,
warmup_epochs,
@@ -177,15 +179,13 @@ def get_lr(self):
self.lrs,
self.step_base_lr,
self.steps,
- self.max_epoch,
- )
+ self.max_epoch, )
lr_end = self._lr_func_steps_with_relative_lrs(
self.warmup_epochs,
self.lrs,
self.step_base_lr,
self.steps,
- self.max_epoch,
- )
+ self.max_epoch, )
# Perform warm up.
if self.last_epoch < self.warmup_epochs:
@@ -251,8 +251,8 @@ def get_warmup_lr(self, cur_iters):
def step(self, epoch=None):
self.regular_lr = self.get_regular_lr()
self.last_lr = self.get_lr()
- self.cnt_epoch = (self.cnt_iters +
- 1) // self.num_iters # update step with iters
+ self.cnt_epoch = (
+ self.cnt_iters + 1) // self.num_iters # update step with iters
self.cnt_iters += 1
if self.verbose:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/lr.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/lr.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/optimizer.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/optimizer.py
old mode 100644
new mode 100755
index ff97edf9b..f7e44a0ed
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/optimizer.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/solver/optimizer.py
@@ -64,12 +64,12 @@ def build_optimizer(cfg, lr_scheduler, model=None):
if isinstance(cfg_copy.get('weight_decay'),
float): # just an float factor
cfg_copy['weight_decay'] = cfg_copy.get('weight_decay')
- elif 'L1' in cfg_copy.get('weight_decay').get(
- 'name').upper(): # specify L2 wd and it's float factor
+ elif 'L1' in cfg_copy.get('weight_decay').get('name').upper(
+ ): # specify L2 wd and it's float factor
cfg_copy['weight_decay'] = L1Decay(
cfg_copy.get('weight_decay').get('value'))
- elif 'L2' in cfg_copy.get('weight_decay').get(
- 'name').upper(): # specify L1 wd and it's float factor
+ elif 'L2' in cfg_copy.get('weight_decay').get('name').upper(
+ ): # specify L1 wd and it's float factor
cfg_copy['weight_decay'] = L2Decay(
cfg_copy.get('weight_decay').get('value'))
else:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/test.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/test.py
old mode 100644
new mode 100755
index 5b4d89cf0..25313bb93
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/test.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/test.py
@@ -53,15 +53,16 @@ def test_model(cfg, weights, parallel=True):
# default num worker: 0, which means no subprocess will be created
num_workers = cfg.DATASET.get('num_workers', 0)
num_workers = cfg.DATASET.get('test_num_workers', num_workers)
- dataloader_setting = dict(batch_size=batch_size,
- num_workers=num_workers,
- places=places,
- drop_last=False,
- shuffle=False)
+ dataloader_setting = dict(
+ batch_size=batch_size,
+ num_workers=num_workers,
+ places=places,
+ drop_last=False,
+ shuffle=False)
data_loader = build_dataloader(
- dataset, **dataloader_setting) if cfg.model_name not in ['CFBI'
- ] else dataset
+ dataset,
+ **dataloader_setting) if cfg.model_name not in ['CFBI'] else dataset
model.eval()
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train.py
old mode 100644
new mode 100755
index 92852e51e..583864fa4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train.py
@@ -57,8 +57,8 @@ def train_model(cfg,
use_gradient_accumulation = cfg.get('GRADIENT_ACCUMULATION', None)
if use_gradient_accumulation and dist.get_world_size() >= 1:
- global_batch_size = cfg.GRADIENT_ACCUMULATION.get(
- 'global_batch_size', None)
+ global_batch_size = cfg.GRADIENT_ACCUMULATION.get('global_batch_size',
+ None)
num_gpus = dist.get_world_size()
assert isinstance(
@@ -102,10 +102,11 @@ def train_model(cfg,
# 2. Construct dataset and dataloader
train_dataset = build_dataset((cfg.DATASET.train, cfg.PIPELINE.train))
- train_dataloader_setting = dict(batch_size=batch_size,
- num_workers=num_workers,
- collate_fn_cfg=cfg.get('MIX', None),
- places=places)
+ train_dataloader_setting = dict(
+ batch_size=batch_size,
+ num_workers=num_workers,
+ collate_fn_cfg=cfg.get('MIX', None),
+ places=places)
train_loader = build_dataloader(train_dataset, **train_dataloader_setting)
@@ -147,9 +148,10 @@ def train_model(cfg,
# 4. Train Model
###AMP###
if amp:
- scaler = paddle.amp.GradScaler(init_loss_scaling=2.0**16,
- incr_every_n_steps=2000,
- decr_every_n_nan_or_inf=1)
+ scaler = paddle.amp.GradScaler(
+ init_loss_scaling=2.0**16,
+ incr_every_n_steps=2000,
+ decr_every_n_nan_or_inf=1)
best = 0.0
for epoch in range(0, cfg.epochs):
@@ -275,7 +277,8 @@ def evaluate(best):
if i % cfg.get("log_interval", 10) == 0:
ips = "ips: {:.5f} instance/sec.".format(
valid_batch_size / record_list["batch_time"].val)
- log_batch(record_list, i, epoch + 1, cfg.epochs, "val", ips)
+ log_batch(record_list, i, epoch + 1, cfg.epochs, "val",
+ ips)
if cfg.MODEL.framework == "FastRCNN":
if parallel:
@@ -291,8 +294,8 @@ def evaluate(best):
log_epoch(record_list, epoch + 1, "val", ips)
best_flag = False
- if cfg.MODEL.framework == "FastRCNN" and (not parallel or
- (parallel and rank == 0)):
+ if cfg.MODEL.framework == "FastRCNN" and (
+ not parallel or (parallel and rank == 0)):
if record_list["mAP@0.5IOU"].val > best:
best = record_list["mAP@0.5IOU"].val
best_flag = True
@@ -312,15 +315,16 @@ def evaluate(best):
return best, best_flag
# use precise bn to improve acc
- if cfg.get("PRECISEBN") and (epoch % cfg.PRECISEBN.preciseBN_interval
- == 0 or epoch == cfg.epochs - 1):
+ if cfg.get("PRECISEBN") and (
+ epoch % cfg.PRECISEBN.preciseBN_interval == 0 or
+ epoch == cfg.epochs - 1):
do_preciseBN(
model, train_loader, parallel,
min(cfg.PRECISEBN.num_iters_preciseBN, len(train_loader)))
# 5. Validation
- if validate and (epoch % cfg.get("val_interval", 1) == 0
- or epoch == cfg.epochs - 1):
+ if validate and (epoch % cfg.get("val_interval", 1) == 0 or
+ epoch == cfg.epochs - 1):
with paddle.no_grad():
best, save_best_flag = evaluate(best)
# save best
@@ -347,13 +351,11 @@ def evaluate(best):
# 6. Save model and optimizer
if epoch % cfg.get("save_interval", 1) == 0 or epoch == cfg.epochs - 1:
- save(
- optimizer.state_dict(),
- osp.join(output_dir,
- model_name + f"_epoch_{epoch+1:05d}.pdopt"))
- save(
- model.state_dict(),
- osp.join(output_dir,
- model_name + f"_epoch_{epoch+1:05d}.pdparams"))
+ save(optimizer.state_dict(),
+ osp.join(output_dir,
+ model_name + f"_epoch_{epoch+1:05d}.pdopt"))
+ save(model.state_dict(),
+ osp.join(output_dir,
+ model_name + f"_epoch_{epoch+1:05d}.pdparams"))
logger.info(f'training {model_name} finished')
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train_dali.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train_dali.py
old mode 100644
new mode 100755
index 5017091a3..7276d1cb6
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train_dali.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train_dali.py
@@ -55,9 +55,8 @@ def train_dali(cfg, weights=None, parallel=True):
# 3. Construct solver.
lr = build_lr(cfg.OPTIMIZER.learning_rate, None)
- optimizer = build_optimizer(cfg.OPTIMIZER,
- lr,
- parameter_list=model.parameters())
+ optimizer = build_optimizer(
+ cfg.OPTIMIZER, lr, parameter_list=model.parameters())
# Resume
resume_epoch = cfg.get("resume_epoch", 0)
@@ -125,21 +124,20 @@ def train_dali(cfg, weights=None, parallel=True):
log_epoch(record_list, epoch + 1, "train", ips)
# use precise bn to improve acc
- if cfg.get("PRECISEBN") and (epoch % cfg.PRECISEBN.preciseBN_interval
- == 0 or epoch == cfg.epochs - 1):
+ if cfg.get("PRECISEBN") and (
+ epoch % cfg.PRECISEBN.preciseBN_interval == 0 or
+ epoch == cfg.epochs - 1):
do_preciseBN(
model, train_loader, parallel,
min(cfg.PRECISEBN.num_iters_preciseBN, len(train_loader)))
# 5. Save model and optimizer
if epoch % cfg.get("save_interval", 1) == 0 or epoch == cfg.epochs - 1:
- save(
- optimizer.state_dict(),
- osp.join(output_dir,
- model_name + f"_epoch_{epoch+1:05d}.pdopt"))
- save(
- model.state_dict(),
- osp.join(output_dir,
- model_name + f"_epoch_{epoch+1:05d}.pdparams"))
+ save(optimizer.state_dict(),
+ osp.join(output_dir,
+ model_name + f"_epoch_{epoch+1:05d}.pdopt"))
+ save(model.state_dict(),
+ osp.join(output_dir,
+ model_name + f"_epoch_{epoch+1:05d}.pdparams"))
logger.info(f'training {model_name} finished')
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train_multigrid.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train_multigrid.py
old mode 100644
new mode 100755
index 5ae76fe5d..82d287213
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train_multigrid.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/tasks/train_multigrid.py
@@ -35,8 +35,7 @@ def construct_loader(cfg, places, validate, precise_bn, num_iters_precise_bn,
precise_bn_dataloader_setting = dict(
batch_size=batch_size,
num_workers=cfg.DATASET.get('num_workers', 0),
- places=places,
- )
+ places=places, )
if precise_bn:
cfg.DATASET.train.num_samples_precise_bn = num_iters_precise_bn * batch_size * world_size
precise_bn_dataset = build_dataset(
@@ -51,10 +50,9 @@ def construct_loader(cfg, places, validate, precise_bn, num_iters_precise_bn,
# get batch size list in short cycle schedule
bs_factor = [
int(
- round((float(
- cfg.PIPELINE.train.transform[1]['MultiCrop']['target_size'])
- / (s * cfg.MULTIGRID.default_crop_size))**2))
- for s in cfg.MULTIGRID.short_cycle_factors
+ round((float(cfg.PIPELINE.train.transform[1]['MultiCrop'][
+ 'target_size']) / (s * cfg.MULTIGRID.default_crop_size))**
+ 2)) for s in cfg.MULTIGRID.short_cycle_factors
]
batch_sizes = [
batch_size * bs_factor[0],
@@ -65,20 +63,19 @@ def construct_loader(cfg, places, validate, precise_bn, num_iters_precise_bn,
batch_size=batch_sizes,
multigrid=True,
num_workers=cfg.DATASET.get('num_workers', 0),
- places=places,
- )
+ places=places, )
else:
train_dataloader_setting = precise_bn_dataloader_setting
train_loader = build_dataloader(train_dataset, **train_dataloader_setting)
if validate:
valid_dataset = build_dataset((cfg.DATASET.valid, cfg.PIPELINE.valid))
- validate_dataloader_setting = dict(batch_size=batch_size,
- num_workers=cfg.DATASET.get(
- 'num_workers', 0),
- places=places,
- drop_last=False,
- shuffle=False)
+ validate_dataloader_setting = dict(
+ batch_size=batch_size,
+ num_workers=cfg.DATASET.get('num_workers', 0),
+ places=places,
+ drop_last=False,
+ shuffle=False)
valid_loader = build_dataloader(valid_dataset,
**validate_dataloader_setting)
else:
@@ -116,9 +113,8 @@ def build_trainer(cfg, places, parallel, validate, precise_bn,
)
lr = build_lr(cfg.OPTIMIZER.learning_rate, len(train_loader))
- optimizer = build_optimizer(cfg.OPTIMIZER,
- lr,
- parameter_list=model.parameters())
+ optimizer = build_optimizer(
+ cfg.OPTIMIZER, lr, parameter_list=model.parameters())
return (
model,
@@ -126,8 +122,7 @@ def build_trainer(cfg, places, parallel, validate, precise_bn,
optimizer,
train_loader,
valid_loader,
- precise_bn_loader,
- )
+ precise_bn_loader, )
def train_model_multigrid(cfg, world_size=1, validate=True):
@@ -156,7 +151,7 @@ def train_model_multigrid(cfg, world_size=1, validate=True):
places = paddle.set_device('npu')
else:
places = paddle.set_device('gpu')
-
+
model_name = cfg.model_name
output_dir = cfg.get("output_dir", f"./output/{model_name}")
mkdir(output_dir)
@@ -181,9 +176,8 @@ def train_model_multigrid(cfg, world_size=1, validate=True):
# 3. Construct optimizer
lr = build_lr(cfg.OPTIMIZER.learning_rate, len(train_loader))
- optimizer = build_optimizer(cfg.OPTIMIZER,
- lr,
- parameter_list=model.parameters())
+ optimizer = build_optimizer(
+ cfg.OPTIMIZER, lr, parameter_list=model.parameters())
# Resume
resume_epoch = cfg.get("resume_epoch", 0)
@@ -213,9 +207,9 @@ def train_model_multigrid(cfg, world_size=1, validate=True):
optimizer,
train_loader,
valid_loader,
- precise_bn_loader,
- ) = build_trainer(cfg, places, parallel, validate, precise_bn,
- num_iters_precise_bn, world_size)
+ precise_bn_loader, ) = build_trainer(
+ cfg, places, parallel, validate, precise_bn,
+ num_iters_precise_bn, world_size)
#load checkpoint after re-build model
if epoch != 0:
@@ -327,8 +321,8 @@ def evaluate(best):
# 6. Save model and optimizer
if is_eval_epoch(
- cfg, epoch,
- total_epochs, multigrid.schedule) or epoch % cfg.get(
+ cfg, epoch, total_epochs,
+ multigrid.schedule) or epoch % cfg.get(
"save_interval", 10) == 0 or epoch in multi_save_epoch:
logger.info("[Save parameters] ======")
subn_save(output_dir, model_name + str(local_rank) + '_', epoch + 1,
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/build_utils.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/build_utils.py
old mode 100644
new mode 100755
index 73c0ca46b..a1c12711b
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/build_utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/build_utils.py
@@ -30,6 +30,6 @@ def build(cfg, registry, key='name'):
obj_cls = registry.get(obj_type)
if obj_cls is None:
- raise KeyError('{} is not in the {} registry'.format(
- obj_type, registry.name))
+ raise KeyError('{} is not in the {} registry'.format(obj_type,
+ registry.name))
return obj_cls(**cfg_copy)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/config.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/config.py
old mode 100644
new mode 100755
index f4d794116..2ef4c3725
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/config.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/config.py
@@ -63,8 +63,8 @@ def print_dict(d, delimiter=0):
placeholder = "-" * 60
for k, v in sorted(d.items()):
if isinstance(v, dict):
- logger.info("{}{} : ".format(delimiter * " ", coloring(k,
- "HEADER")))
+ logger.info("{}{} : ".format(delimiter * " ",
+ coloring(k, "HEADER")))
print_dict(v, delimiter + 4)
elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict):
logger.info("{}{} : ".format(delimiter * " ",
@@ -104,6 +104,7 @@ def override(dl, ks, v):
ks(list): list of keys
v(str): value to be replaced
"""
+
def str2num(v):
try:
return eval(v)
@@ -127,8 +128,8 @@ def str2num(v):
dl[ks[0]] = str2num(v)
else:
assert ks[0] in dl, (
- '({}) doesn\'t exist in {}, a new dict field is invalid'.format(
- ks[0], dl))
+ '({}) doesn\'t exist in {}, a new dict field is invalid'.
+ format(ks[0], dl))
override(dl[ks[0]], ks[1:], v)
@@ -165,7 +166,8 @@ def get_config(fname, overrides=None, show=True):
"""
Read config from file
"""
- assert os.path.exists(fname), ('config file({}) is not exist'.format(fname))
+ assert os.path.exists(fname), (
+ 'config file({}) is not exist'.format(fname))
config = parse_config(fname)
override_config(config, overrides)
if show:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/dist_utils.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/dist_utils.py
old mode 100644
new mode 100755
index 7659e88c1..1a99fe866
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/dist_utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/dist_utils.py
@@ -16,15 +16,18 @@
import paddle
import paddle.distributed as dist
+
def get_dist_info():
world_size = dist.get_world_size()
rank = dist.get_rank()
return rank, world_size
+
def main_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
+
return wrapper
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/logger.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/logger.py
old mode 100644
new mode 100755
index e9791b89b..a9e68f3b4
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/logger.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/logger.py
@@ -19,8 +19,6 @@
from paddle.distributed import ParallelEnv
-
-
Color = {
'RED': '\033[31m',
'HEADER': '\033[35m', # deep purple
@@ -55,15 +53,17 @@ def setup_logger(output=None, name="paddlevideo", level="INFO"):
Returns:
logging.Logger: a logger
"""
+
def time_zone(sec, fmt):
real_time = datetime.datetime.now()
return real_time.timetuple()
+
logging.Formatter.converter = time_zone
logger = logging.getLogger(name)
if level == "INFO":
logger.setLevel(logging.INFO)
- elif level=="DEBUG":
+ elif level == "DEBUG":
logger.setLevel(logging.DEBUG)
logger.propagate = False
@@ -73,8 +73,7 @@ def time_zone(sec, fmt):
datefmt="%m/%d %H:%M:%S")
else:
plain_formatter = logging.Formatter(
- "[%(asctime)s] %(message)s",
- datefmt="%m/%d %H:%M:%S")
+ "[%(asctime)s] %(message)s", datefmt="%m/%d %H:%M:%S")
# stdout logging: master only
local_rank = ParallelEnv().local_rank
if local_rank == 0:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/batchnorm_helper.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/batchnorm_helper.py
old mode 100644
new mode 100755
index e39b067d8..4aed1b234
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/batchnorm_helper.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/batchnorm_helper.py
@@ -15,8 +15,8 @@ def get_norm(bn_norm_type, bn_num_splits):
elif bn_norm_type == "sub_batchnorm":
return partial(SubBatchNorm3D, num_splits=bn_num_splits)
else:
- raise NotImplementedError(
- "Norm type {} is not supported".format(bn_norm_type))
+ raise NotImplementedError("Norm type {} is not supported".format(
+ bn_norm_type))
def aggregate_sub_bn_stats(model):
@@ -47,6 +47,7 @@ class SubBatchNorm3D(paddle.nn.Layer):
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
+
def __init__(self, num_splits, **args):
"""
Args:
@@ -75,14 +76,12 @@ def __init__(self, num_splits, **args):
and self.weight_attr.learning_rate == 0.
if self.bias_attr == False:
- self.bias = self.create_parameter(attr=None,
- shape=[self.num_features],
- is_bias=True)
+ self.bias = self.create_parameter(
+ attr=None, shape=[self.num_features], is_bias=True)
self.bias.stop_gradient = True
else:
- self.bias = self.create_parameter(attr=self.bias_attr,
- shape=[self.num_features],
- is_bias=True)
+ self.bias = self.create_parameter(
+ attr=self.bias_attr, shape=[self.num_features], is_bias=True)
self.bias.stop_gradient = self.bias_attr is not None \
and self.bias_attr.learning_rate == 0.
@@ -123,8 +122,7 @@ def aggregate_stats(self):
bn_mean_tensor, bn_variance_tensor = self._get_aggregated_mean_std(
self.split_bn._mean,
self.split_bn._variance,
- self.num_splits,
- )
+ self.num_splits, )
self.bn._mean.set_value(bn_mean_tensor)
self.bn._variance.set_value(bn_variance_tensor)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/interval_helper.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/interval_helper.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/multigrid.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/multigrid.py
old mode 100644
new mode 100755
index a296a0608..e579ec26e
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/multigrid.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/multigrid.py
@@ -7,6 +7,7 @@ class MultigridSchedule(object):
"""
This class defines multigrid training schedule and update cfg accordingly.
"""
+
def init_multigrid(self, cfg):
"""
Update cfg based on multigrid settings.
@@ -68,8 +69,8 @@ def update_long_cycle(self, cfg, cur_epoch):
cfg (configs): the updated cfg.
changed (bool): whether to change long cycle shape at this epoch
"""
- base_b, base_t, base_s = get_current_long_cycle_shape(
- self.schedule, cur_epoch)
+ base_b, base_t, base_s = get_current_long_cycle_shape(self.schedule,
+ cur_epoch)
if base_s != cfg.PIPELINE.train.transform[1]['MultiCrop'][
'target_size'] or base_t != cfg.PIPELINE.train.decode_sampler.num_frames:
#NOTE Modify
@@ -77,7 +78,8 @@ def update_long_cycle(self, cfg, cur_epoch):
# cfg.MODEL.head.num_frames = base_t
# cfg.MODEL.head.crop_size = base_s
cfg.PIPELINE.train.decode_sampler.num_frames = base_t
- cfg.PIPELINE.train.transform[1]['MultiCrop']['target_size'] = base_s
+ cfg.PIPELINE.train.transform[1]['MultiCrop'][
+ 'target_size'] = base_s
cfg.DATASET.batch_size = base_b * cfg.MULTIGRID.default_batch_size #change bs
bs_factor = (float(cfg.DATASET.batch_size) /
@@ -122,8 +124,8 @@ def get_long_cycle_schedule(self, cfg):
default_size = float(
cfg.PIPELINE.train.decode_sampler.num_frames *
- cfg.PIPELINE.train.transform[1]['MultiCrop']['target_size']**
- 2) # 32 * 224 * 224 C*H*W
+ cfg.PIPELINE.train.transform[1]['MultiCrop']['target_size']
+ **2) # 32 * 224 * 224 C*H*W
default_iters = steps[-1] # 196
# Get shapes and average batch size for each long cycle shape.
@@ -135,9 +137,8 @@ def get_long_cycle_schedule(self, cfg):
base_t = int(
round(cfg.PIPELINE.train.decode_sampler.num_frames * t_factor))
base_s = int(
- round(
- cfg.PIPELINE.train.transform[1]['MultiCrop']['target_size']
- * s_factor))
+ round(cfg.PIPELINE.train.transform[1]['MultiCrop'][
+ 'target_size'] * s_factor))
if cfg.MULTIGRID.SHORT_CYCLE:
shapes = [
[
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/save_load_helper.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/save_load_helper.py
old mode 100644
new mode 100755
index 94a52d58b..d2fcdeb26
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/save_load_helper.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/save_load_helper.py
@@ -62,9 +62,9 @@ def normal_to_sub_bn(checkpoint_sd, model_sd):
model_blob_shape = model_sd[key].shape #bn.split_bn
c2_blob_shape = checkpoint_sd[key].shape #bn.bn
- if (len(model_blob_shape) == 1 and len(c2_blob_shape) == 1
- and model_blob_shape[0] > c2_blob_shape[0]
- and model_blob_shape[0] % c2_blob_shape[0] == 0):
+ if (len(model_blob_shape) == 1 and len(c2_blob_shape) == 1 and
+ model_blob_shape[0] > c2_blob_shape[0] and
+ model_blob_shape[0] % c2_blob_shape[0] == 0):
before_shape = checkpoint_sd[key].shape
checkpoint_sd[key] = np.concatenate(
[checkpoint_sd[key]] *
@@ -88,6 +88,7 @@ def mapping_opt_dict(opt_dict, model_key_list):
model_key_list: the parameters name list of re-build model.
Return: optimizer state dict with modified keys
"""
+
def get_name_info(PNAME, PN_key_list, key_list):
min_index = float('inf')
max_index = 0
@@ -157,12 +158,12 @@ def get_name_info(PNAME, PN_key_list, key_list):
for key in pd_key_list:
for name in PN_key_list[1:]:
if key.startswith(name):
- start = change_dict[name] if (
- change_name and "batch_norm" in name) else name
+ start = change_dict[name] if (change_name and
+ "batch_norm" in name) else name
str_index = key.split('.')[0].split(name)[-1]
index = int(str_index)
- new_index = str(index +
- (PNAME[start][1][0] - PNAME[name][0][0]))
+ new_index = str(index + (PNAME[start][1][0] - PNAME[name][0][0]
+ ))
end = key.split('.')[-1]
update_key = start + new_index + '.' + end
opt_dict[update_key] = opt_dict.pop(key)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/short_sampler.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/short_sampler.py
old mode 100644
new mode 100755
index 0004dace4..ef5553b03
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/short_sampler.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/multigrid/short_sampler.py
@@ -35,6 +35,7 @@ class DistributedShortSampler(BatchSampler):
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size. Default False
"""
+
def __init__(self,
dataset,
batch_sizes,
@@ -78,8 +79,8 @@ def __init__(self,
def __iter__(self):
num_samples = len(self.dataset)
indices = np.arange(num_samples).tolist()
- indices += indices[:(self.total_size -
- len(indices))] #completion last iter
+ indices += indices[:(self.total_size - len(indices)
+ )] #completion last iter
assert len(indices) == self.total_size
if self.shuffle:
np.random.RandomState(self.epoch).shuffle(indices)
@@ -100,10 +101,9 @@ def _get_indices_by_batch_size(indices):
subsampled_indices.extend(indices[i:i + total_batch_size])
indices = indices[len(indices) - last_batch_size:]
- subsampled_indices.extend(
- indices[self.local_rank *
- last_local_batch_size:(self.local_rank + 1) *
- last_local_batch_size])
+ subsampled_indices.extend(indices[
+ self.local_rank * last_local_batch_size:(
+ self.local_rank + 1) * last_local_batch_size])
return subsampled_indices
if self.nranks > 1:
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/precise_bn.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/precise_bn.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/profiler.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/profiler.py
old mode 100644
new mode 100755
index 04201aa26..eddb19c37
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/profiler.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/profiler.py
@@ -43,6 +43,7 @@ class ProfilerOptions(object):
which can be used to generate a timeline.
exit_on_finished - a boolean.
"""
+
def __init__(self, options_str):
assert isinstance(options_str, str)
@@ -99,8 +100,8 @@ def add_profiler_step(options_str=None):
_profiler_options = ProfilerOptions(options_str)
if _profiler_step_id == _profiler_options['batch_range'][0]:
- paddle.utils.profiler.start_profiler(_profiler_options['state'],
- _profiler_options['tracer_option'])
+ paddle.utils.profiler.start_profiler(
+ _profiler_options['state'], _profiler_options['tracer_option'])
elif _profiler_step_id == _profiler_options['batch_range'][1]:
paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],
_profiler_options['profile_path'])
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/record.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/record.py
old mode 100644
new mode 100755
index edaed7cb1..4be99b669
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/record.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/record.py
@@ -26,7 +26,8 @@
def build_record(cfg):
record_list = [
("loss", AverageMeter('loss', '7.5f')),
- ("lr", AverageMeter('lr', 'f', need_avg=False)),
+ ("lr", AverageMeter(
+ 'lr', 'f', need_avg=False)),
]
if 'Recognizer1D' in cfg.framework: #TODO: required specify str in framework
record_list.append(("hit_at_one", AverageMeter("hit_at_one", '.5f')))
@@ -67,6 +68,7 @@ class AverageMeter(object):
"""
Computes and stores the average and current value
"""
+
def __init__(self, name='', fmt='f', need_avg=True):
self.name = name
self.fmt = fmt
@@ -95,8 +97,8 @@ def total(self):
@property
def total_minute(self):
- return '{self.name}_sum: {s:{self.fmt}} min'.format(s=self.sum / 60,
- self=self)
+ return '{self.name}_sum: {s:{self.fmt}} min'.format(
+ s=self.sum / 60, self=self)
@property
def mean(self):
@@ -122,8 +124,10 @@ def log_batch(metric_list, batch_id, epoch_id, total_epoch, mode, ips):
logger.info("{:s} {:s} {:s} {:s} {:s} {}".format(
coloring(epoch_str, "HEADER") if batch_id == 0 else epoch_str,
- coloring(step_str, "PURPLE"), coloring(metric_str, 'OKGREEN'),
- coloring(batch_cost, "OKGREEN"), coloring(reader_cost, 'OKGREEN'), ips))
+ coloring(step_str, "PURPLE"),
+ coloring(metric_str, 'OKGREEN'),
+ coloring(batch_cost, "OKGREEN"), coloring(reader_cost, 'OKGREEN'),
+ ips))
def log_epoch(metric_list, epoch, mode, ips):
@@ -140,6 +144,8 @@ def log_epoch(metric_list, epoch, mode, ips):
end_epoch_str = "END epoch:{:<3d}".format(epoch)
logger.info("{:s} {:s} {:s} {:s} {:s} {:s} {}".format(
- coloring(end_epoch_str, "RED"), coloring(mode, "PURPLE"),
- coloring(metric_str, "OKGREEN"), coloring(batch_cost, "OKGREEN"),
+ coloring(end_epoch_str, "RED"),
+ coloring(mode, "PURPLE"),
+ coloring(metric_str, "OKGREEN"),
+ coloring(batch_cost, "OKGREEN"),
coloring(reader_cost, "OKGREEN"), coloring(batch_sum, "OKGREEN"), ips))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/registry.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/registry.py
old mode 100644
new mode 100755
index 81b76bd51..0add76eb6
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/registry.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/registry.py
@@ -40,6 +40,7 @@ class ResNet:
b = BACKBONES.get(backbone_name)()
"""
+
def __init__(self, name):
"""
Args:
@@ -89,8 +90,7 @@ def get(self, name):
"""
ret = self._obj_map.get(name)
if ret is None:
- raise KeyError(
- "No object named '{}' found in '{}' registry!".format(
- name, self._name))
+ raise KeyError("No object named '{}' found in '{}' registry!".
+ format(name, self._name))
return ret
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/save_load.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/save_load.py
old mode 100644
new mode 100755
index 71465cbb5..566bb7ffc
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/save_load.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/utils/save_load.py
@@ -58,13 +58,13 @@ def pretrain_swin_param_trans(model, state_dicts):
k for k in state_dicts.keys() if "relative_position_bias_table" in k
]
total_len = len(relative_position_bias_table_keys)
- with tqdm(total=total_len,
- position=1,
- bar_format='{desc}',
- desc="Loading weights") as desc:
- for key in tqdm(relative_position_bias_table_keys,
- total=total_len,
- position=0):
+ with tqdm(
+ total=total_len,
+ position=1,
+ bar_format='{desc}',
+ desc="Loading weights") as desc:
+ for key in tqdm(
+ relative_position_bias_table_keys, total=total_len, position=0):
relative_position_bias_table_pretrained = state_dicts[key]
relative_position_bias_table_current = model.state_dict()[key]
L1, nH1 = relative_position_bias_table_pretrained.shape
@@ -108,13 +108,12 @@ def pretrain_vit_param_trans(model, state_dicts, num_patches, num_seg,
if num_patches + 1 != state_dicts['pos_embed'].shape[1]:
pos_embed = state_dicts['pos_embed']
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
- other_pos_embed = pos_embed[0,
- 1:, :].unsqueeze(0).unsqueeze(1).transpose(
- (0, 1, 3, 2))
- new_pos_embed = F.interpolate(other_pos_embed,
- size=(other_pos_embed.shape[-2],
- num_patches),
- mode='nearest')
+ other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).unsqueeze(
+ 1).transpose((0, 1, 3, 2))
+ new_pos_embed = F.interpolate(
+ other_pos_embed,
+ size=(other_pos_embed.shape[-2], num_patches),
+ mode='nearest')
new_pos_embed = new_pos_embed.squeeze(0).transpose((0, 2, 1))
new_pos_embed = paddle.concat((cls_pos_embed, new_pos_embed), axis=1)
state_dicts['pos_embed'] = new_pos_embed
@@ -123,16 +122,16 @@ def pretrain_vit_param_trans(model, state_dicts, num_patches, num_seg,
if 'time_embed' in state_dicts and num_seg != state_dicts[
'time_embed'].shape[1]:
time_embed = state_dicts['time_embed'].transpose((0, 2, 1)).unsqueeze(0)
- new_time_embed = F.interpolate(time_embed,
- size=(time_embed.shape[-2], num_seg),
- mode='nearest')
+ new_time_embed = F.interpolate(
+ time_embed, size=(time_embed.shape[-2], num_seg), mode='nearest')
state_dicts['time_embed'] = new_time_embed.squeeze(0).transpose(
(0, 2, 1))
time.sleep(0.01)
- with tqdm(total=total_len,
- position=1,
- bar_format='{desc}',
- desc="Loading weights") as desc:
+ with tqdm(
+ total=total_len,
+ position=1,
+ bar_format='{desc}',
+ desc="Loading weights") as desc:
if attention_type == 'divided_space_time':
new_state_dicts = state_dicts.copy()
for key in tqdm(state_dicts):
@@ -164,13 +163,13 @@ def pretrain_resnet18_param_trans(model, loaded_dict):
names = ['encoder.', 'encoder_day.', 'encoder_night.']
for name in names:
total_len = len(loaded_dict.items())
- with tqdm(total=total_len,
- position=1,
- bar_format='{desc}',
- desc="Loading weights") as desc:
- for key, value in tqdm(loaded_dict.items(),
- total=total_len,
- position=0):
+ with tqdm(
+ total=total_len,
+ position=1,
+ bar_format='{desc}',
+ desc="Loading weights") as desc:
+ for key, value in tqdm(
+ loaded_dict.items(), total=total_len, position=0):
key = str(name + key)
if key in encoder_dict:
encoder_dict[key] = value
@@ -181,13 +180,13 @@ def pretrain_resnet18_param_trans(model, loaded_dict):
loaded_dict['conv1.weight'] = paddle.concat(
[loaded_dict['conv1.weight']] * num_input_images, 1) / num_input_images
total_len = len(loaded_dict.items())
- with tqdm(total=total_len,
- position=1,
- bar_format='{desc}',
- desc="Loading weights") as desc:
- for name, value in tqdm(loaded_dict.items(),
- total=total_len,
- position=0):
+ with tqdm(
+ total=total_len,
+ position=1,
+ bar_format='{desc}',
+ desc="Loading weights") as desc:
+ for name, value in tqdm(
+ loaded_dict.items(), total=total_len, position=0):
name = str('encoder.' + name)
if name in pose_encoder_dict:
pose_encoder_dict[name] = value
@@ -231,10 +230,11 @@ def load_ckpt(model, weight_path, **kargs):
else:
tmp = {}
total_len = len(model.state_dict())
- with tqdm(total=total_len,
- position=1,
- bar_format='{desc}',
- desc="Loading weights") as desc:
+ with tqdm(
+ total=total_len,
+ position=1,
+ bar_format='{desc}',
+ desc="Loading weights") as desc:
for item in tqdm(model.state_dict(), total=total_len, position=0):
name = item
desc.set_description('Loading %s' % name)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/version.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/version.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/requirements.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/requirements.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/run.sh b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/run.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/setup.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/setup.py
old mode 100644
new mode 100755
index b5f3e85ff..b70d4a2d5
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/setup.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/setup.py
@@ -17,6 +17,7 @@
with open('requirements.txt', encoding="utf-8-sig") as f:
requirements = f.readlines()
+
def readme():
with open('docs/en/whl_en.md', encoding="utf-8-sig") as f:
README = f.read()
@@ -24,12 +25,14 @@ def readme():
setup(
- name='paddlevideo', #name of .whl file
- packages=['ppvideo'], #install package name
+ name='paddlevideo', #name of .whl file
+ packages=['ppvideo'], #install package name
package_dir={'ppvideo': ''},
- include_package_data=True, #Accept all data files and directories matched by MANIFEST.in
+ include_package_data=True, #Accept all data files and directories matched by MANIFEST.in
install_requires=requirements,
- entry_points={"console_scripts": ["ppvideo= ppvideo.tools.paddlevideo_clas:main"]},
+ entry_points={
+ "console_scripts": ["ppvideo= ppvideo.tools.paddlevideo_clas:main"]
+ },
version='0.0.1',
license='Apache License 2.0',
description='Awesome Video toolkits based on PaddlePaddle ',
@@ -38,10 +41,11 @@ def readme():
url='https://github.com/PaddlePaddle/PaddleVideo',
download_url='https://github.com/PaddlePaddle/PaddleVideo.git',
keywords=[
- 'A treasure chest for video understanding powered by PaddlePaddle.'
+ 'A treasure chest for video understanding powered by PaddlePaddle.'
],
classifiers=[
- 'Intended Audience :: Developers', 'Operating System :: OS Independent',
+ 'Intended Audience :: Developers',
+ 'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
@@ -50,4 +54,4 @@ def readme():
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7', 'Topic :: Utilities'
- ],)
\ No newline at end of file
+ ], )
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/README.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/common_func.sh b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/common_func.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/compare_results.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/compare_results.py
old mode 100644
new mode 100755
index dd8308dc9..273387d64
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/compare_results.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/compare_results.py
@@ -23,10 +23,8 @@ def parse_args():
def run_shell_command(cmd):
- p = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True)
+ p = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
if p.returncode == 0:
@@ -128,10 +126,8 @@ def collect_predict_from_logs(log_path, key_list):
def testing_assert_allclose(dict_x, dict_y, atol=1e-7, rtol=1e-7):
for k in dict_x:
- np.testing.assert_allclose(np.array(dict_x[k]),
- np.array(dict_y[k]),
- atol=atol,
- rtol=rtol)
+ np.testing.assert_allclose(
+ np.array(dict_x[k]), np.array(dict_y[k]), atol=atol, rtol=rtol)
if __name__ == "__main__":
@@ -157,10 +153,8 @@ def testing_assert_allclose(dict_x, dict_y, atol=1e-7, rtol=1e-7):
continue
pred_dict = pred_collection[filename]
try:
- testing_assert_allclose(gt_dict,
- pred_dict,
- atol=args.atol,
- rtol=args.rtol)
+ testing_assert_allclose(
+ gt_dict, pred_dict, atol=args.atol, rtol=args.rtol)
print(
"Assert allclose passed! The results of {} and {} are consistent!"
.format(filename, gt_filename))
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/AGCN/AGCN_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/AGCN/AGCN_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/AttentionLSTM/AttentionLSTM_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/AttentionLSTM/AttentionLSTM_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/BMN/BMN_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/BMN/BMN_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/PP-TSM/PP-TSM_infer_cpp.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/PP-TSM/PP-TSM_infer_cpp.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/PP-TSM/PP-TSM_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/PP-TSM/PP-TSM_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/PP-TSN/PP-TSN_infer_cpp.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/PP-TSN/PP-TSN_infer_cpp.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/PP-TSN/PP-TSN_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/PP-TSN/PP-TSN_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/STGCN/STGCN_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/STGCN/STGCN_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/SlowFast/SlowFast_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/SlowFast/SlowFast_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/TSM/TSM_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/TSM/TSM_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/TSN/TSN_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/TSN/TSN_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/TimeSformer/TimeSformer_train_infer_python.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/configs/TimeSformer/TimeSformer_train_infer_python.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/Video_TIPC.png b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/Video_TIPC.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/guide.png b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/guide.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/install.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/install.md
old mode 100644
new mode 100755
index e9c32d7f5..07b2da985
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/install.md
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/install.md
@@ -124,4 +124,3 @@ source /etc/profile
export LD_LIBRARY_PATH=/xx/xx/TensorRT-7.0.0.11/lib:$LD_LIBRARY_PATH
```
或者问题是下载的TensorRT版本和当前paddle中编译的TRT版本不匹配,需要下载版本相符的TensorRT重新安装。
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/test_inference_cpp.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/test_inference_cpp.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/test_train_inference_python.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/docs/test_train_inference_python.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/prepare.sh b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/prepare.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/AGCN/python_ppvideo_AGCN_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/AGCN/python_ppvideo_AGCN_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/AGCN/python_ppvideo_AGCN_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/AGCN/python_ppvideo_AGCN_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/AttentionLSTM/python_ppvideo_AttentionLSTM_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/AttentionLSTM/python_ppvideo_AttentionLSTM_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/AttentionLSTM/python_ppvideo_AttentionLSTM_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/AttentionLSTM/python_ppvideo_AttentionLSTM_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/BMN/python_ppvideo_BMN_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/BMN/python_ppvideo_BMN_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/BMN/python_ppvideo_BMN_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/BMN/python_ppvideo_BMN_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSM/python_ppvideo_PP-TSM_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSM/python_ppvideo_PP-TSM_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSM/python_ppvideo_PP-TSM_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSM/python_ppvideo_PP-TSM_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSM_CPP/cpp_ppvideo_PP-TSM_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSM_CPP/cpp_ppvideo_PP-TSM_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSM_CPP/cpp_ppvideo_PP-TSM_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSM_CPP/cpp_ppvideo_PP-TSM_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSN/python_ppvideo_PP-TSN_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSN/python_ppvideo_PP-TSN_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSN/python_ppvideo_PP-TSN_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSN/python_ppvideo_PP-TSN_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSN_CPP/cpp_ppvideo_PP-TSN_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSN_CPP/cpp_ppvideo_PP-TSN_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSN_CPP/cpp_ppvideo_PP-TSN_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/PP-TSN_CPP/cpp_ppvideo_PP-TSN_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/STGCN.txt/python_ppvideo_STGCN_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/STGCN.txt/python_ppvideo_STGCN_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/STGCN.txt/python_ppvideo_STGCN_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/STGCN.txt/python_ppvideo_STGCN_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/SlowFast/python_ppvideo_SlowFast_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/SlowFast/python_ppvideo_SlowFast_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/SlowFast/python_ppvideo_SlowFast_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/SlowFast/python_ppvideo_SlowFast_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TSM/python_ppvideo_TSM_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TSM/python_ppvideo_TSM_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TSM/python_ppvideo_TSM_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TSM/python_ppvideo_TSM_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TSN/python_ppvideo_TSN_results_fp16.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TSN/python_ppvideo_TSN_results_fp16.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TSN/python_ppvideo_TSN_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TSN/python_ppvideo_TSN_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TimeSformer/python_ppvideo_TimeSformer_results_fp32.txt b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/results/TimeSformer/python_ppvideo_TimeSformer_results_fp32.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/test_inference_cpp.sh b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/test_inference_cpp.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/test_train_inference_python.sh b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/test_tipc/test_train_inference_python.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/__init__.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/__init__.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/ava_predict.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/ava_predict.py
old mode 100644
new mode 100755
index 4f617a32b..778f75c5a
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/ava_predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/ava_predict.py
@@ -159,55 +159,56 @@ def frame_extraction(video_path, target_dir):
def parse_args():
-
def str2bool(v):
return v.lower() in ("true", "t", "1")
# general params
parser = argparse.ArgumentParser("PaddleVideo Inference model script")
- parser.add_argument('-c',
- '--config',
- type=str,
- default='configs/example.yaml',
- help='config file path')
+ parser.add_argument(
+ '-c',
+ '--config',
+ type=str,
+ default='configs/example.yaml',
+ help='config file path')
parser.add_argument('--video_path', help='video file/url')
- parser.add_argument('-o',
- '--override',
- action='append',
- default=[],
- help='config options to be overridden')
- parser.add_argument('-w',
- '--weights',
- type=str,
- help='weights for finetuning or testing')
+ parser.add_argument(
+ '-o',
+ '--override',
+ action='append',
+ default=[],
+ help='config options to be overridden')
+ parser.add_argument(
+ '-w', '--weights', type=str, help='weights for finetuning or testing')
#detection_model_name
- parser.add_argument('--detection_model_name',
- help='the name of detection model ')
+ parser.add_argument(
+ '--detection_model_name', help='the name of detection model ')
# detection_model_weights
- parser.add_argument('--detection_model_weights',
- help='the weights path of detection model ')
+ parser.add_argument(
+ '--detection_model_weights',
+ help='the weights path of detection model ')
# params for predict
- parser.add_argument('--out-filename',
- default='ava_det_demo.mp4',
- help='output filename')
- parser.add_argument('--predict-stepsize',
- default=8,
- type=int,
- help='give out a prediction per n frames')
+ parser.add_argument(
+ '--out-filename', default='ava_det_demo.mp4', help='output filename')
+ parser.add_argument(
+ '--predict-stepsize',
+ default=8,
+ type=int,
+ help='give out a prediction per n frames')
parser.add_argument(
'--output-stepsize',
default=4,
type=int,
help=('show one frame per n frames in the demo, we should have: '
'predict_stepsize % output_stepsize == 0'))
- parser.add_argument('--output-fps',
- default=6,
- type=int,
- help='the fps of demo video output')
+ parser.add_argument(
+ '--output-fps',
+ default=6,
+ type=int,
+ help='the fps of demo video output')
return parser.parse_args()
@@ -276,7 +277,8 @@ def detection_inference(frame_paths, output_dir, model_name, weights_path):
print('Performing Human Detection for each frame')
- detection_trainer.predict(frame_paths, output_dir=output_dir, save_txt=True)
+ detection_trainer.predict(
+ frame_paths, output_dir=output_dir, save_txt=True)
print("finish object detection")
@@ -359,7 +361,8 @@ def main(args):
assert clip_len % 2 == 0, 'We would like to have an even clip_len'
frame_interval = config.PIPELINE.test.sample['frame_interval']
window_size = clip_len * frame_interval
- timestamps = np.arange(window_size // 2, (num_frame + 1 - window_size // 2),
+ timestamps = np.arange(window_size // 2,
+ (num_frame + 1 - window_size // 2),
args.predict_stepsize)
print("timetamps number:", len(timestamps))
@@ -389,10 +392,9 @@ def main(args):
detection_result_dir = 'tmp_detection'
detection_model_name = args.detection_model_name
detection_model_weights = args.detection_model_weights
- detection_txt_list = detection_inference(selected_frame_list,
- detection_result_dir,
- detection_model_name,
- detection_model_weights)
+ detection_txt_list = detection_inference(
+ selected_frame_list, detection_result_dir, detection_model_name,
+ detection_model_weights)
assert len(detection_txt_list) == len(timestamps)
print('Performing SpatioTemporal Action Detection for each clip')
@@ -411,11 +413,8 @@ def main(args):
human_detections.append(proposals)
- result = get_timestep_result(frame_dir,
- timestamp,
- clip_len,
- frame_interval,
- FPS=FPS)
+ result = get_timestep_result(
+ frame_dir, timestamp, clip_len, frame_interval, FPS=FPS)
result["proposals"] = proposals
result["scores"] = scores
@@ -435,10 +434,11 @@ def main(args):
img_shape = img_shape[np.newaxis, :]
data = [
- paddle.to_tensor(img_slow, dtype='float32'),
- paddle.to_tensor(img_fast, dtype='float32'),
- paddle.to_tensor(proposals, dtype='float32'), scores,
- paddle.to_tensor(img_shape, dtype='int32')
+ paddle.to_tensor(
+ img_slow, dtype='float32'), paddle.to_tensor(
+ img_fast, dtype='float32'), paddle.to_tensor(
+ proposals, dtype='float32'), scores, paddle.to_tensor(
+ img_shape, dtype='int32')
]
with paddle.no_grad():
@@ -458,8 +458,8 @@ def main(args):
continue
for j in range(person_num):
if result[i][j, 4] > config.MODEL.head['action_thr']:
- prediction[j].append((label_map[i + 1], result[i][j,
- 4]))
+ prediction[j].append((label_map[i + 1],
+ result[i][j, 4]))
predictions.append(prediction)
index = index + 1
@@ -474,8 +474,8 @@ def dense_timestamps(timestamps, n):
"""Make it nx frames."""
old_frame_interval = (timestamps[1] - timestamps[0])
start = timestamps[0] - old_frame_interval / n * (n - 1) / 2
- new_frame_inds = np.arange(
- len(timestamps) * n) * old_frame_interval / n + start
+ new_frame_inds = np.arange(len(timestamps) *
+ n) * old_frame_interval / n + start
return new_frame_inds.astype(np.int)
dense_n = int(args.predict_stepsize / args.output_stepsize) #30
@@ -491,8 +491,8 @@ def dense_timestamps(timestamps, n):
except ImportError:
raise ImportError('Please install moviepy to enable output file')
- vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames],
- fps=args.output_fps)
+ vid = mpy.ImageSequenceClip(
+ [x[:, :, ::-1] for x in vis_frames], fps=args.output_fps)
vid.write_videofile(args.out_filename)
print("finish write !")
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/export_model.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/export_model.py
old mode 100644
new mode 100755
index 92f6768d8..2ca1b4ec8
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/export_model.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/export_model.py
@@ -30,21 +30,24 @@
def parse_args():
parser = argparse.ArgumentParser("PaddleVideo export model script")
- parser.add_argument('-c',
- '--config',
- type=str,
- default='configs/example.yaml',
- help='config file path')
- parser.add_argument("-p",
- "--pretrained_params",
- default='./best.pdparams',
- type=str,
- help='params path')
- parser.add_argument("-o",
- "--output_path",
- type=str,
- default="./inference",
- help='output path')
+ parser.add_argument(
+ '-c',
+ '--config',
+ type=str,
+ default='configs/example.yaml',
+ help='config file path')
+ parser.add_argument(
+ "-p",
+ "--pretrained_params",
+ default='./best.pdparams',
+ type=str,
+ help='params path')
+ parser.add_argument(
+ "-o",
+ "--output_path",
+ type=str,
+ default="./inference",
+ help='output path')
return parser.parse_args()
@@ -66,115 +69,133 @@ def get_input_spec(cfg, model_name):
if model_name in ['ppTSM', 'TSM']:
input_spec = [[
InputSpec(
- shape=[None, cfg.num_seg, 3, cfg.target_size, cfg.target_size],
+ shape=[
+ None, cfg.num_seg, 3, cfg.target_size, cfg.target_size
+ ],
dtype='float32'),
]]
elif model_name in ['TSN', 'ppTSN']:
input_spec = [[
- InputSpec(shape=[
- None, cfg.num_seg * 10, 3, cfg.target_size, cfg.target_size
- ],
- dtype='float32'),
+ InputSpec(
+ shape=[
+ None, cfg.num_seg * 10, 3, cfg.target_size, cfg.target_size
+ ],
+ dtype='float32'),
]]
elif model_name in ['BMN']:
input_spec = [[
- InputSpec(shape=[None, cfg.feat_dim, cfg.tscale],
- dtype='float32',
- name='feat_input'),
+ InputSpec(
+ shape=[None, cfg.feat_dim, cfg.tscale],
+ dtype='float32',
+ name='feat_input'),
]]
elif model_name in ['TimeSformer', 'ppTimeSformer']:
input_spec = [[
- InputSpec(shape=[
- None, 3, cfg.num_seg * 3, cfg.target_size, cfg.target_size
- ],
- dtype='float32'),
+ InputSpec(
+ shape=[
+ None, 3, cfg.num_seg * 3, cfg.target_size, cfg.target_size
+ ],
+ dtype='float32'),
]]
elif model_name in ['VideoSwin']:
input_spec = [[
- InputSpec(shape=[
- None, 3, cfg.num_seg * cfg.seg_len * 1, cfg.target_size,
- cfg.target_size
- ],
- dtype='float32'),
+ InputSpec(
+ shape=[
+ None, 3, cfg.num_seg * cfg.seg_len * 1, cfg.target_size,
+ cfg.target_size
+ ],
+ dtype='float32'),
]]
elif model_name in ['VideoSwin_TableTennis']:
input_spec = [[
- InputSpec(shape=[
- None, 3, cfg.num_seg * cfg.seg_len * 3, cfg.target_size,
- cfg.target_size
- ],
- dtype='float32'),
+ InputSpec(
+ shape=[
+ None, 3, cfg.num_seg * cfg.seg_len * 3, cfg.target_size,
+ cfg.target_size
+ ],
+ dtype='float32'),
]]
elif model_name in ['AttentionLSTM']:
input_spec = [[
- InputSpec(shape=[None, cfg.embedding_size, cfg.feature_dims[0]],
- dtype='float32'), # for rgb_data
- InputSpec(shape=[
- None,
- ], dtype='int64'), # for rgb_len
- InputSpec(shape=[None, cfg.embedding_size, cfg.feature_dims[0]],
- dtype='float32'), # for rgb_mask
- InputSpec(shape=[None, cfg.embedding_size, cfg.feature_dims[1]],
- dtype='float32'), # for audio_data
- InputSpec(shape=[
- None,
- ], dtype='int64'), # for audio_len
- InputSpec(shape=[None, cfg.embedding_size, cfg.feature_dims[1]],
- dtype='float32'), # for audio_mask
+ InputSpec(
+ shape=[None, cfg.embedding_size, cfg.feature_dims[0]],
+ dtype='float32'), # for rgb_data
+ InputSpec(
+ shape=[None, ], dtype='int64'), # for rgb_len
+ InputSpec(
+ shape=[None, cfg.embedding_size, cfg.feature_dims[0]],
+ dtype='float32'), # for rgb_mask
+ InputSpec(
+ shape=[None, cfg.embedding_size, cfg.feature_dims[1]],
+ dtype='float32'), # for audio_data
+ InputSpec(
+ shape=[None, ], dtype='int64'), # for audio_len
+ InputSpec(
+ shape=[None, cfg.embedding_size, cfg.feature_dims[1]],
+ dtype='float32'), # for audio_mask
]]
elif model_name in ['SlowFast']:
input_spec = [[
- InputSpec(shape=[
- None, 3, cfg.num_frames // cfg.alpha, cfg.target_size,
- cfg.target_size
- ],
- dtype='float32',
- name='slow_input'),
- InputSpec(shape=[
- None, 3, cfg.num_frames, cfg.target_size, cfg.target_size
- ],
- dtype='float32',
- name='fast_input'),
+ InputSpec(
+ shape=[
+ None, 3, cfg.num_frames // cfg.alpha, cfg.target_size,
+ cfg.target_size
+ ],
+ dtype='float32',
+ name='slow_input'),
+ InputSpec(
+ shape=[
+ None, 3, cfg.num_frames, cfg.target_size, cfg.target_size
+ ],
+ dtype='float32',
+ name='fast_input'),
]]
elif model_name in ['STGCN', 'AGCN']:
input_spec = [[
- InputSpec(shape=[
- None, cfg.num_channels, cfg.window_size, cfg.vertex_nums,
- cfg.person_nums
- ],
- dtype='float32'),
+ InputSpec(
+ shape=[
+ None, cfg.num_channels, cfg.window_size, cfg.vertex_nums,
+ cfg.person_nums
+ ],
+ dtype='float32'),
]]
elif model_name in ['TransNetV2']:
input_spec = [[
- InputSpec(shape=[
- None,
- cfg.num_frames,
- cfg.height,
- cfg.width,
- cfg.num_channels,
- ],
- dtype='float32'),
+ InputSpec(
+ shape=[
+ None,
+ cfg.num_frames,
+ cfg.height,
+ cfg.width,
+ cfg.num_channels,
+ ],
+ dtype='float32'),
]]
elif model_name in ['ADDS']:
input_spec = [[
- InputSpec(shape=[None, cfg.num_channels, cfg.height, cfg.width],
- dtype='float32'),
+ InputSpec(
+ shape=[None, cfg.num_channels, cfg.height, cfg.width],
+ dtype='float32'),
]]
elif model_name in ['AVA_SlowFast_FastRcnn']:
input_spec = [[
- InputSpec(shape=[
- None, 3, cfg.num_frames // cfg.alpha, cfg.target_size,
- cfg.target_size
- ],
- dtype='float32',
- name='slow_input'),
- InputSpec(shape=[
- None, 3, cfg.num_frames, cfg.target_size, cfg.target_size
- ],
- dtype='float32',
- name='fast_input'),
- InputSpec(shape=[None, None, 4], dtype='float32', name='proposals'),
- InputSpec(shape=[None, 2], dtype='float32', name='img_shape')
+ InputSpec(
+ shape=[
+ None, 3, cfg.num_frames // cfg.alpha, cfg.target_size,
+ cfg.target_size
+ ],
+ dtype='float32',
+ name='slow_input'), InputSpec(
+ shape=[
+ None, 3, cfg.num_frames, cfg.target_size,
+ cfg.target_size
+ ],
+ dtype='float32',
+ name='fast_input'), InputSpec(
+ shape=[None, None, 4],
+ dtype='float32',
+ name='proposals'), InputSpec(
+ shape=[None, 2], dtype='float32', name='img_shape')
]]
return input_spec
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/paddlevideo_clas.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/paddlevideo_clas.py
old mode 100644
new mode 100755
index 4843e62f8..86dec9c15
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/paddlevideo_clas.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/paddlevideo_clas.py
@@ -32,7 +32,6 @@
__dir__ = os.path.dirname(__file__)
sys.path.append(os.path.join(__dir__, ''))
-
import numpy as np
import tarfile
import requests
@@ -48,7 +47,7 @@
BASE_INFERENCE_MODEL_DIR = os.path.join(BASE_DIR, 'inference_model')
BASE_VIDEOS_DIR = os.path.join(BASE_DIR, 'videos')
-model_names = {'ppTSM','TSM','TSN'}
+model_names = {'ppTSM', 'TSM', 'TSN'}
def create_paddle_predictor(args):
@@ -78,6 +77,7 @@ def create_paddle_predictor(args):
return predictor
+
def download_with_progressbar(url, save_path):
response = requests.get(url, stream=True)
total_size_in_bytes = int(response.headers.get('content-length', 0))
@@ -91,6 +91,7 @@ def download_with_progressbar(url, save_path):
if total_size_in_bytes == 0 or progress_bar.n != total_size_in_bytes:
raise Exception("Something went wrong while downloading models")
+
def maybe_download(model_storage_directory, url):
# using custom model
tar_file_name_list = [
@@ -99,11 +100,11 @@ def maybe_download(model_storage_directory, url):
if not os.path.exists(
os.path.join(model_storage_directory, 'inference.pdiparams')
) or not os.path.exists(
- os.path.join(model_storage_directory, 'inference.pdmodel')):
+ os.path.join(model_storage_directory, 'inference.pdmodel')):
tmp_path = os.path.join(model_storage_directory, url.split('/')[-1])
print('download {} to {}'.format(url, tmp_path))
os.makedirs(model_storage_directory, exist_ok=True)
- download_with_progressbar(url, tmp_path) #download
+ download_with_progressbar(url, tmp_path) #download
#save to directory
with tarfile.open(tmp_path, 'r') as tarObj:
@@ -121,6 +122,7 @@ def maybe_download(model_storage_directory, url):
f.write(file.read())
os.remove(tmp_path)
+
def load_label_name_dict(path):
result = {}
if not os.path.exists(path):
@@ -137,6 +139,7 @@ def load_label_name_dict(path):
break
return result
+
def parse_args(mMain=True, add_help=True):
import argparse
@@ -147,8 +150,8 @@ def str2bool(v):
# general params
parser = argparse.ArgumentParser(add_help=add_help)
- parser.add_argument("--model_name", type=str,default='')
- parser.add_argument("-v", "--video_file", type=str,default='')
+ parser.add_argument("--model_name", type=str, default='')
+ parser.add_argument("-v", "--video_file", type=str, default='')
parser.add_argument("--use_gpu", type=str2bool, default=True)
# params for decode and sample
@@ -161,7 +164,7 @@ def str2bool(v):
parser.add_argument("--normalize", type=str2bool, default=True)
# params for predict
- parser.add_argument("--model_file", type=str,default='')
+ parser.add_argument("--model_file", type=str, default='')
parser.add_argument("--params_file", type=str)
parser.add_argument("-b", "--batch_size", type=int, default=1)
parser.add_argument("--use_fp16", type=str2bool, default=False)
@@ -170,7 +173,7 @@ def str2bool(v):
parser.add_argument("--gpu_mem", type=int, default=8000)
parser.add_argument("--top_k", type=int, default=1)
parser.add_argument("--enable_mkldnn", type=bool, default=False)
- parser.add_argument("--label_name_path",type=str,default='')
+ parser.add_argument("--label_name_path", type=str, default='')
return parser.parse_args()
@@ -195,12 +198,13 @@ def str2bool(v):
enable_mkldnn=False,
label_name_path='')
+
def get_video_list(video_file):
videos_lists = []
if video_file is None or not os.path.exists(video_file):
raise Exception("not found any video file in {}".format(video_file))
- video_end = ['mp4','avi']
+ video_end = ['mp4', 'avi']
if os.path.isfile(video_file) and video_file.split('.')[-1] in video_end:
videos_lists.append(video_file)
elif os.path.isdir(video_file):
@@ -211,12 +215,13 @@ def get_video_list(video_file):
raise Exception("not found any video file in {}".format(video_file))
return videos_lists
+
class PaddleVideo(object):
print('Inference models that Paddle provides are listed as follows:\n\n{}'.
format(model_names), '\n')
def __init__(self, **kwargs):
- process_params = parse_args(mMain=False,add_help=False)
+ process_params = parse_args(mMain=False, add_help=False)
process_params.__dict__.update(**kwargs)
if not os.path.exists(process_params.model_file):
@@ -224,7 +229,8 @@ def __init__(self, **kwargs):
raise Exception(
'Please input model name that you want to use!')
if process_params.model_name in model_names:
- url = 'https://videotag.bj.bcebos.com/PaddleVideo/InferenceModel/{}_infer.tar'.format(process_params.model_name)
+ url = 'https://videotag.bj.bcebos.com/PaddleVideo/InferenceModel/{}_infer.tar'.format(
+ process_params.model_name)
if not os.path.exists(
os.path.join(BASE_INFERENCE_MODEL_DIR,
process_params.model_name)):
@@ -254,7 +260,7 @@ def __init__(self, **kwargs):
self.args = process_params
self.predictor = create_paddle_predictor(process_params)
- def predict(self,video):
+ def predict(self, video):
"""
predict label of video with paddlevideo_clas
Args:
@@ -291,13 +297,13 @@ def predict(self,video):
total_result = []
for filename in video_list:
if isinstance(filename, str):
- v = utils.decode(filename, self.args)
+ v = utils.decode(filename, self.args)
assert v is not None, "Error in loading video: {}".format(
filename)
inputs = utils.preprocess(v, self.args)
inputs = np.expand_dims(
inputs, axis=0).repeat(
- 1, axis=0).copy()
+ 1, axis=0).copy()
else:
inputs = filename
@@ -311,7 +317,8 @@ def predict(self,video):
if len(self.label_name_dict) != 0:
label_names = [self.label_name_dict[c] for c in classes]
result = {
- "videoname": filename if isinstance(filename, str) else 'video',
+ "videoname": filename
+ if isinstance(filename, str) else 'video',
"class_ids": classes.tolist(),
"scores": scores.tolist(),
"label_names": label_names,
@@ -319,6 +326,7 @@ def predict(self,video):
total_result.append(result)
return total_result
+
def main():
# for cmd
args = parse_args(mMain=True)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/predict.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/predict.py
old mode 100644
new mode 100755
index 9c317c20b..6514861bf
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/predict.py
@@ -29,11 +29,12 @@ def str2bool(v):
# general params
parser = argparse.ArgumentParser("PaddleVideo Inference model script")
- parser.add_argument('-c',
- '--config',
- type=str,
- default='configs/example.yaml',
- help='config file path')
+ parser.add_argument(
+ '-c',
+ '--config',
+ type=str,
+ default='configs/example.yaml',
+ help='config file path')
parser.add_argument("-i", "--input_file", type=str, help="input file path")
parser.add_argument("--model_file", type=str)
parser.add_argument("--params_file", type=str)
@@ -98,8 +99,8 @@ def create_paddle_predictor(args, cfg):
elif 'videoswin' in cfg.model_name.lower():
num_views = 3 # UniformCrop
max_batch_size = args.batch_size * num_views * num_seg * seg_len
- config.enable_tensorrt_engine(precision_mode=precision,
- max_batch_size=max_batch_size)
+ config.enable_tensorrt_engine(
+ precision_mode=precision, max_batch_size=max_batch_size)
config.enable_memory_optim()
# use zero copy
@@ -115,9 +116,7 @@ def create_paddle_predictor(args, cfg):
def parse_file_paths(input_path: str) -> list:
if osp.isfile(input_path):
- files = [
- input_path,
- ]
+ files = [input_path, ]
else:
files = os.listdir(input_path)
files = [
@@ -198,21 +197,20 @@ def main():
# instantiate auto log
import auto_log
pid = os.getpid()
- autolog = auto_log.AutoLogger(model_name=cfg.model_name,
- model_precision=args.precision,
- batch_size=args.batch_size,
- data_shape="dynamic",
- save_path="./output/auto_log.lpg",
- inference_config=inference_config,
- pids=pid,
- process_name=None,
- gpu_ids=0 if args.use_gpu else None,
- time_keys=[
- 'preprocess_time',
- 'inference_time',
- 'postprocess_time'
- ],
- warmup=num_warmup)
+ autolog = auto_log.AutoLogger(
+ model_name=cfg.model_name,
+ model_precision=args.precision,
+ batch_size=args.batch_size,
+ data_shape="dynamic",
+ save_path="./output/auto_log.lpg",
+ inference_config=inference_config,
+ pids=pid,
+ process_name=None,
+ gpu_ids=0 if args.use_gpu else None,
+ time_keys=[
+ 'preprocess_time', 'inference_time', 'postprocess_time'
+ ],
+ warmup=num_warmup)
files = [
args.input_file for _ in range(test_video_num + num_warmup)
]
@@ -227,8 +225,8 @@ def main():
autolog.times.start()
# Pre process batched input
- batched_inputs = InferenceHelper.preprocess_batch(
- files[st_idx:ed_idx])
+ batched_inputs = InferenceHelper.preprocess_batch(files[st_idx:
+ ed_idx])
# get pre process time cost
if args.enable_benchmark:
@@ -256,7 +254,7 @@ def main():
# time.sleep(0.01) # sleep for T4 GPU
- # report benchmark log if enabled
+ # report benchmark log if enabled
if args.enable_benchmark:
autolog.report()
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/summary.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/summary.py
old mode 100644
new mode 100755
index f7f98e0f9..c46e2c932
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/summary.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/summary.py
@@ -32,17 +32,17 @@
def parse_args():
parser = argparse.ArgumentParser("PaddleVideo Summary")
- parser.add_argument('-c',
- '--config',
- type=str,
- default='configs/example.yaml',
- help='config file path')
+ parser.add_argument(
+ '-c',
+ '--config',
+ type=str,
+ default='configs/example.yaml',
+ help='config file path')
parser.add_argument("--img_size", type=int, default=224)
parser.add_argument("--num_seg", type=int, default=8)
- parser.add_argument("--FLOPs",
- action="store_true",
- help="whether to print FLOPs")
+ parser.add_argument(
+ "--FLOPs", action="store_true", help="whether to print FLOPs")
return parser.parse_args()
@@ -73,7 +73,8 @@ def main():
print(params_info)
if args.FLOPs:
- flops_info = paddleslim.analysis.flops(model, [1, 1, num_seg, 3, img_size, img_size])
+ flops_info = paddleslim.analysis.flops(
+ model, [1, 1, num_seg, 3, img_size, img_size])
print(flops_info)
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/utils.py b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/utils.py
old mode 100644
new mode 100755
index bcff61137..9a38db0fb
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/utils.py
@@ -145,9 +145,7 @@ def postprocess(self, output, print_output=True):
output: list
"""
if not isinstance(self.input_file, list):
- self.input_file = [
- self.input_file,
- ]
+ self.input_file = [self.input_file, ]
output = output[0] # [B, num_cls]
N = len(self.input_file)
if output.shape[0] != N:
@@ -191,11 +189,9 @@ def preprocess(self, input_file):
img_mean = [0.485, 0.456, 0.406]
img_std = [0.229, 0.224, 0.225]
ops = [
- VideoDecoder(),
- Sampler(self.num_seg, self.seg_len, valid_mode=True),
- Scale(self.short_size),
- CenterCrop(self.target_size),
- Image2Array(),
+ VideoDecoder(), Sampler(
+ self.num_seg, self.seg_len, valid_mode=True),
+ Scale(self.short_size), CenterCrop(self.target_size), Image2Array(),
Normalization(img_mean, img_std)
]
for op in ops:
@@ -230,17 +226,13 @@ def preprocess(self, input_file):
img_mean = [0.485, 0.456, 0.406]
img_std = [0.229, 0.224, 0.225]
ops = [
- VideoDecoder(),
- Sampler(self.num_seg,
- self.seg_len,
- valid_mode=True,
- select_left=True),
- Scale(self.short_size,
- fixed_ratio=True,
- do_round=True,
- backend='cv2'),
- TenCrop(self.target_size),
- Image2Array(),
+ VideoDecoder(), Sampler(
+ self.num_seg, self.seg_len, valid_mode=True,
+ select_left=True), Scale(
+ self.short_size,
+ fixed_ratio=True,
+ do_round=True,
+ backend='cv2'), TenCrop(self.target_size), Image2Array(),
Normalization(img_mean, img_std)
]
for op in ops:
@@ -363,12 +355,13 @@ def preprocess(self, input_file):
input_file)
results = {'filename': input_file}
ops = [
- VideoDecoder(backend='pyav', mode='test', num_seg=self.num_seg),
- Sampler(self.num_seg,
+ VideoDecoder(
+ backend='pyav', mode='test', num_seg=self.num_seg), Sampler(
+ self.num_seg,
self.seg_len,
valid_mode=True,
- linspace_sample=True),
- Normalization(self.mean, self.std, tensor_shape=[1, 1, 1, 3]),
+ linspace_sample=True), Normalization(
+ self.mean, self.std, tensor_shape=[1, 1, 1, 3]),
Image2Array(data_format='cthw'),
JitterScale(self.short_size, self.short_size),
UniformCrop(self.target_size)
@@ -412,23 +405,23 @@ def preprocess(self, input_file):
input_file)
results = {'filename': input_file}
ops = [
- VideoDecoder(backend='decord', mode='valid'),
- Sampler(num_seg=self.num_seg,
+ VideoDecoder(
+ backend='decord', mode='valid'), Sampler(
+ num_seg=self.num_seg,
frame_interval=self.frame_interval,
seg_len=self.seg_len,
valid_mode=True,
- use_pil=False),
- Scale(short_size=self.short_size,
- fixed_ratio=False,
- keep_ratio=True,
- backend='cv2',
- do_round=True),
- CenterCrop(target_size=224, backend='cv2'),
- Normalization(mean=self.mean,
- std=self.std,
- tensor_shape=[3, 1, 1, 1],
- inplace=True),
- Image2Array(data_format='cthw')
+ use_pil=False), Scale(
+ short_size=self.short_size,
+ fixed_ratio=False,
+ keep_ratio=True,
+ backend='cv2',
+ do_round=True), CenterCrop(
+ target_size=224, backend='cv2'), Normalization(
+ mean=self.mean,
+ std=self.std,
+ tensor_shape=[3, 1, 1, 1],
+ inplace=True), Image2Array(data_format='cthw')
]
for op in ops:
results = op(results)
@@ -441,9 +434,7 @@ def postprocess(self, output, print_output=True):
output: list
"""
if not isinstance(self.input_file, list):
- self.input_file = [
- self.input_file,
- ]
+ self.input_file = [self.input_file, ]
output = output[0] # [B, num_cls]
N = len(self.input_file)
if output.shape[0] != N:
@@ -486,22 +477,22 @@ def preprocess(self, input_file):
img_mean = [123.675, 116.28, 103.53]
img_std = [58.395, 57.12, 57.375]
ops = [
- FrameDecoder(),
- SamplerPkl(num_seg=self.num_seg,
- seg_len=self.seg_len,
- backend='cv2',
- valid_mode=True),
- Scale(short_size=self.short_size,
- fixed_ratio=False,
- keep_ratio=True,
- backend='cv2',
- do_round=True),
- UniformCrop(target_size=self.target_size, backend='cv2'),
- Normalization(mean=img_mean,
- std=img_std,
- tensor_shape=[3, 1, 1, 1],
- inplace=True),
- Image2Array(data_format='cthw')
+ FrameDecoder(), SamplerPkl(
+ num_seg=self.num_seg,
+ seg_len=self.seg_len,
+ backend='cv2',
+ valid_mode=True), Scale(
+ short_size=self.short_size,
+ fixed_ratio=False,
+ keep_ratio=True,
+ backend='cv2',
+ do_round=True), UniformCrop(
+ target_size=self.target_size, backend='cv2'),
+ Normalization(
+ mean=img_mean,
+ std=img_std,
+ tensor_shape=[3, 1, 1, 1],
+ inplace=True), Image2Array(data_format='cthw')
]
for op in ops:
results = op(results)
@@ -536,15 +527,16 @@ def add_text_to_video(
frame_height = int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_len = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
- print("fps=", int(fps), "frames=", int(frames_len), "scale=",
- f"{frame_height}x{frame_width}")
+ print("fps=",
+ int(fps), "frames=",
+ int(frames_len), "scale=", f"{frame_height}x{frame_width}")
frames_rgb_list = []
for i in range(int(frames_len)):
if video_path.endswith('.pkl'):
frame = np.array(
- Image.open(BytesIO(frames[i])).convert("RGB").resize(
- (240, 135)))[:, :, ::-1].astype('uint8')
+ Image.open(BytesIO(frames[i])).convert("RGB").resize((
+ 240, 135)))[:, :, ::-1].astype('uint8')
else:
_, frame = videoCapture.read()
frame = cv2.putText(frame, text, (30, 30), cv2.FONT_HERSHEY_COMPLEX,
@@ -555,19 +547,18 @@ def add_text_to_video(
cv2.destroyAllWindows()
output_filename = os.path.basename(video_path)
output_filename = output_filename.split('.')[0] + '.gif'
- imageio.mimsave(f'{output_dir}/{output_filename}',
- frames_rgb_list,
- 'GIF',
- duration=0.00085)
+ imageio.mimsave(
+ f'{output_dir}/{output_filename}',
+ frames_rgb_list,
+ 'GIF',
+ duration=0.00085)
def postprocess(self, output, print_output=True, save_gif=True):
"""
output: list
"""
if not isinstance(self.input_file, list):
- self.input_file = [
- self.input_file,
- ]
+ self.input_file = [self.input_file, ]
output = output[0] # [B, num_cls]
N = len(self.input_file)
if output.shape[0] != N:
@@ -620,11 +611,13 @@ def preprocess(self, input_file):
img_mean = [0.45, 0.45, 0.45]
img_std = [0.225, 0.225, 0.225]
ops = [
- DecodeSampler(self.num_frames, self.sampling_rate, test_mode=True),
+ DecodeSampler(
+ self.num_frames, self.sampling_rate, test_mode=True),
JitterScale(self.target_size, self.target_size),
MultiCrop(self.target_size),
Image2Array(transpose=False),
- Normalization(img_mean, img_std, tensor_shape=[1, 1, 1, 3]),
+ Normalization(
+ img_mean, img_std, tensor_shape=[1, 1, 1, 3]),
PackOutput(self.alpha),
]
for op in ops:
@@ -640,9 +633,7 @@ def postprocess(self, output, print_output=True):
output: list
"""
if not isinstance(self.input_file, list):
- self.input_file = [
- self.input_file,
- ]
+ self.input_file = [self.input_file, ]
output = output[0] # [B, num_cls]
N = len(self.input_file)
@@ -725,11 +716,14 @@ def preprocess(self, input_file):
res = []
for modality in ['rgb', 'audio']:
res.append(
- np.expand_dims(results[f'{modality}_data'], axis=0).copy())
+ np.expand_dims(
+ results[f'{modality}_data'], axis=0).copy())
res.append(
- np.expand_dims(results[f'{modality}_len'], axis=0).copy())
+ np.expand_dims(
+ results[f'{modality}_len'], axis=0).copy())
res.append(
- np.expand_dims(results[f'{modality}_mask'], axis=0).copy())
+ np.expand_dims(
+ results[f'{modality}_mask'], axis=0).copy())
return res
@@ -753,14 +747,14 @@ def input_iterator(self, frames):
# return windows of size 100 where the first/last 25 frames are from the previous/next batch
# the first and last window must be padded by copies of the first and last frame of the video
no_padded_frames_start = 25
- no_padded_frames_end = 25 + 50 - (
- len(frames) % 50 if len(frames) % 50 != 0 else 50) # 25 - 74
+ no_padded_frames_end = 25 + 50 - (len(frames) % 50 if len(frames) % 50
+ != 0 else 50) # 25 - 74
start_frame = np.expand_dims(frames[0], 0)
end_frame = np.expand_dims(frames[-1], 0)
- padded_inputs = np.concatenate([start_frame] * no_padded_frames_start +
- [frames] +
- [end_frame] * no_padded_frames_end, 0)
+ padded_inputs = np.concatenate(
+ [start_frame] * no_padded_frames_start + [frames] + [end_frame] *
+ no_padded_frames_end, 0)
ptr = 0
while ptr + 100 <= len(padded_inputs):
@@ -779,12 +773,9 @@ def preprocess(self, input_file):
input_file)
self.input_file = input_file
self.filename = os.path.splitext(os.path.split(self.input_file)[1])[0]
- video_stream, err = ffmpeg.input(
- self.input_file).output("pipe:",
- format="rawvideo",
- pix_fmt="rgb24",
- s="48x27").run(capture_stdout=True,
- capture_stderr=True)
+ video_stream, err = ffmpeg.input(self.input_file).output(
+ "pipe:", format="rawvideo", pix_fmt="rgb24",
+ s="48x27").run(capture_stdout=True, capture_stderr=True)
self.frames = np.frombuffer(video_stream,
np.uint8).reshape([-1, 27, 48, 3])
self.len_frames = len(self.frames)
@@ -830,9 +821,11 @@ def visualize_predictions(self, frames, predictions):
height = len(frames) // width
img = frames.reshape([height, width, ih + 1, iw + len(predictions), ic])
- img = np.concatenate(np.split(
- np.concatenate(np.split(img, height), axis=2)[0], width),
- axis=2)[0, :-1]
+ img = np.concatenate(
+ np.split(
+ np.concatenate(
+ np.split(img, height), axis=2)[0], width),
+ axis=2)[0, :-1]
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
@@ -849,9 +842,10 @@ def visualize_predictions(self, frames, predictions):
value = round(p * (ih - 1))
if value != 0:
- draw.line((x + j, y, x + j, y - value),
- fill=tuple(color),
- width=1)
+ draw.line(
+ (x + j, y, x + j, y - value),
+ fill=tuple(color),
+ width=1)
return img
def postprocess(self, outputs, print_output=True):
@@ -870,11 +864,9 @@ def postprocess(self, outputs, print_output=True):
all_frames_pred = np.concatenate(
[all_ for single_, all_ in predictions])
single_frame_predictions, all_frame_predictions = single_frame_pred[:
- self
- .
+ self.
len_frames], all_frames_pred[:
- self
- .
+ self.
len_frames]
scenes = self.predictions_to_scenes(single_frame_predictions)
@@ -910,12 +902,10 @@ class ADDS_Inference_helper(Base_Inference_helper):
def __init__(self,
frame_idxs=[0],
num_scales=4,
- side_map={
- "2": 2,
- "3": 3,
- "l": 2,
- "r": 3
- },
+ side_map={"2": 2,
+ "3": 3,
+ "l": 2,
+ "r": 3},
height=256,
width=512,
full_res_shape=None,
@@ -952,15 +942,13 @@ def preprocess(self, input_file):
num_scales=self.num_scales,
side_map=self.side_map,
full_res_shape=self.full_res_shape,
- img_ext=self.img_ext,
- ),
+ img_ext=self.img_ext, ),
GroupResize(
height=self.height,
width=self.width,
K=self.K,
scale=1,
- mode='infer',
- ),
+ mode='infer', ),
ToArray(),
]
for op in ops:
@@ -974,9 +962,7 @@ def postprocess(self, output, print_output, save_dir='data/'):
output: list
"""
if not isinstance(self.input_file, list):
- self.input_file = [
- self.input_file,
- ]
+ self.input_file = [self.input_file, ]
print(len(output))
N = len(self.input_file)
for i in range(N):
@@ -1018,8 +1004,8 @@ def __init__(self,
self.detection_model_name = detection_model_name
self.detection_model_weights = detection_model_weights
- self.config = get_config(config_file_path,
- show=False) #parse config file
+ self.config = get_config(
+ config_file_path, show=False) #parse config file
self.predict_stepsize = predict_stepsize
self.output_stepsize = output_stepsize
self.output_fps = output_fps
@@ -1077,10 +1063,9 @@ def preprocess(self, input_file):
detection_result_dir = 'tmp_detection'
detection_model_name = self.detection_model_name
detection_model_weights = self.detection_model_weights
- detection_txt_list = detection_inference(selected_frame_list,
- detection_result_dir,
- detection_model_name,
- detection_model_weights)
+ detection_txt_list = detection_inference(
+ selected_frame_list, detection_result_dir, detection_model_name,
+ detection_model_weights)
assert len(detection_txt_list) == len(timestamps)
human_detections = []
@@ -1100,11 +1085,8 @@ def preprocess(self, input_file):
human_detections.append(proposals)
- result = get_timestep_result(frame_dir,
- timestamp,
- clip_len,
- frame_interval,
- FPS=FPS)
+ result = get_timestep_result(
+ frame_dir, timestamp, clip_len, frame_interval, FPS=FPS)
result["proposals"] = proposals
result["scores"] = scores
@@ -1124,10 +1106,11 @@ def preprocess(self, input_file):
img_shape = img_shape[np.newaxis, :]
data = [
- paddle.to_tensor(img_slow, dtype='float32'),
- paddle.to_tensor(img_fast, dtype='float32'),
- paddle.to_tensor(proposals, dtype='float32'),
- paddle.to_tensor(img_shape, dtype='int32')
+ paddle.to_tensor(
+ img_slow, dtype='float32'), paddle.to_tensor(
+ img_fast, dtype='float32'), paddle.to_tensor(
+ proposals, dtype='float32'), paddle.to_tensor(
+ img_shape, dtype='int32')
]
person_num = proposals.shape[1]
@@ -1199,8 +1182,8 @@ def dense_timestamps(timestamps, n):
"""Make it nx frames."""
old_frame_interval = (timestamps[1] - timestamps[0])
start = timestamps[0] - old_frame_interval / n * (n - 1) / 2
- new_frame_inds = np.arange(
- len(timestamps) * n) * old_frame_interval / n + start
+ new_frame_inds = np.arange(len(timestamps) *
+ n) * old_frame_interval / n + start
return new_frame_inds.astype(np.int)
dense_n = int(self.predict_stepsize / self.output_stepsize) #30
@@ -1216,8 +1199,8 @@ def dense_timestamps(timestamps, n):
except ImportError:
raise ImportError('Please install moviepy to enable output file')
- vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames],
- fps=self.output_fps)
+ vid = mpy.ImageSequenceClip(
+ [x[:, :, ::-1] for x in vis_frames], fps=self.output_fps)
vid.write_videofile(self.out_filename)
print("finish write !")
diff --git a/Paddle_Industry_Practice_Sample_Library/Football_Action/README.md b/Paddle_Industry_Practice_Sample_Library/Football_Action/README.md
old mode 100644
new mode 100755
index a928841c0..b13dc1b05
--- a/Paddle_Industry_Practice_Sample_Library/Football_Action/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Football_Action/README.md
@@ -114,7 +114,7 @@ sh download_dataset.sh
输入视频为mp4文件,我们提供的视频样本 football.mp4 时长1h43min。训练时如果使用全部视频文件,会消耗大量计算资源,一般预先做一些采样处理。
-图像采样:以fps=5的频率抽取图像帧
+图像采样:以fps=5的频率抽取图像帧
音频采样:pcm音频文件,采样频率ar=16000
运行以下代码进行图像和音频采样。
diff --git a/Paddle_Industry_Practice_Sample_Library/HumanSeg/README.md b/Paddle_Industry_Practice_Sample_Library/HumanSeg/README.md
old mode 100644
new mode 100755
index 628d942fc..22c6b5a64
--- a/Paddle_Industry_Practice_Sample_Library/HumanSeg/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/HumanSeg/README.md
@@ -9,14 +9,14 @@
-**如果您觉得本案例对您有帮助,欢迎Star收藏一下,不易走丢哦~,链接指路:**
+**如果您觉得本案例对您有帮助,欢迎Star收藏一下,不易走丢哦~,链接指路:**
[https://github.com/PaddlePaddle/awesome-DeepLearning](http://https://github.com/PaddlePaddle/awesome-DeepLearning)
# 1. 方案设计
本教程中,将用户上传的图片或视频作为输入,使用基于Supervisely Persons数据集训练的人像分割模型或预先准备好的Inference Model进行人像分割实验,实时返回分割结果显示给用户。
-# 2. 环境搭建与准备
+# 2. 环境搭建与准备
1. 安装PaddlePaddle
@@ -340,4 +340,4 @@ Portrait模型适用于宽屏拍摄场景,竖屏效果会略差一些。
**数据来源**
-本案例数据集来源于:https://supervise.ly/
\ No newline at end of file
+本案例数据集来源于:https://supervise.ly/
diff --git a/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/1.gif b/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/1.gif
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/2.png b/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/2.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/3.png b/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/3.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/4.jpg b/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/4.jpg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/5.gif b/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/5.gif
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/6.gif b/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/6.gif
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/7.gif b/Paddle_Industry_Practice_Sample_Library/HumanSeg/imgs/7.gif
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Lip-syncing/Lip-syncing.md b/Paddle_Industry_Practice_Sample_Library/Lip-syncing/Lip-syncing.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Lip-syncing/data/audio.m4a b/Paddle_Industry_Practice_Sample_Library/Lip-syncing/data/audio.m4a
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Lip-syncing/data/picture.jpeg b/Paddle_Industry_Practice_Sample_Library/Lip-syncing/data/picture.jpeg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Motion_Driving/Motion_Driving.md b/Paddle_Industry_Practice_Sample_Library/Motion_Driving/Motion_Driving.md
old mode 100644
new mode 100755
index f99d14289..81889efbb
--- a/Paddle_Industry_Practice_Sample_Library/Motion_Driving/Motion_Driving.md
+++ b/Paddle_Industry_Practice_Sample_Library/Motion_Driving/Motion_Driving.md
@@ -76,7 +76,7 @@ pip install imageio-ffmpeg
### 表情动作迁移
-运行如下命令,实现表情动作迁移。其中,各参数的具体使用说明如下:
+运行如下命令,实现表情动作迁移。其中,各参数的具体使用说明如下:
- driving_video: 驱动视频,视频中人物的表情动作作为待迁移的对象。本项目中驱动视频路径为 "data/driving_video.MOV",大家可以上传自己准备的视频,更换 `driving_video` 参数对应的路径;
- source_image: 原始图片,视频中人物的表情动作将迁移到该原始图片中的人物上。这里原始图片路径使用 "data/image.jpeg",大家可以使用自己准备的图片,更换 `source_image` 参数对应的路径;
- relative: 指示程序中使用视频和图片中人物关键点的相对坐标还是绝对坐标,建议使用相对坐标,若使用绝对坐标,会导致迁移后人物扭曲变形;
diff --git a/Paddle_Industry_Practice_Sample_Library/Motion_Driving/data/driving_video.MOV b/Paddle_Industry_Practice_Sample_Library/Motion_Driving/data/driving_video.MOV
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Motion_Driving/data/image.jpeg b/Paddle_Industry_Practice_Sample_Library/Motion_Driving/data/image.jpeg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Motion_Driving/data/music.mov b/Paddle_Industry_Practice_Sample_Library/Motion_Driving/data/music.mov
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/README.md b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/README.md
old mode 100644
new mode 100755
index 13e4c32c4..5b175c142
--- a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/README.md
@@ -143,15 +143,15 @@ python -m paddle.distributed.launch --log_dir=./mcfairmot_dla34_30e_1088x608_vis
## 6 模型评估
-考虑到本案例的场景中希望视频里车辆大部分时间ID都是准确的,从而更好地进行针对性地管控。因此使用IFD1作为评估指标。IDF1衡量了整个视频的跟踪连贯性,评估的是整体的目标被正确识别的概率。
+考虑到本案例的场景中希望视频里车辆大部分时间ID都是准确的,从而更好地进行针对性地管控。因此使用IFD1作为评估指标。IDF1衡量了整个视频的跟踪连贯性,评估的是整体的目标被正确识别的概率。
-$$ IFD1 = \frac{2IDTP}{2IDTP+IDFP+IDFN} $$
+$$ IFD1 = \frac{2IDTP}{2IDTP+IDFP+IDFN} $$
其中,
-$$IDFN=\sum_{\tau \in AT}{\sum_{t\in T_{\tau}}{m(\tau , \lambda _m(\tau), t, \Delta)}}$$
+$$IDFN=\sum_{\tau \in AT}{\sum_{t\in T_{\tau}}{m(\tau , \lambda _m(\tau), t, \Delta)}}$$
-$$IDFP=\sum_{\Delta \in AC}{\sum_{t\in T_{\Delta}}{m(\tau _m(\lambda), \lambda , t, \Delta)}}$$
+$$IDFP=\sum_{\Delta \in AC}{\sum_{t\in T_{\Delta}}{m(\tau _m(\lambda), \lambda , t, \Delta)}}$$
$$IDTP=\sum_{\tau \in AT}{len(\tau)-IDFN}$$
@@ -305,13 +305,13 @@ mot_jde_infer.predict_naive(model_dir, video_file, image_dir, device, threshold,
* 设备1:
```
CPU:80 Intel(R) Xeon(R) Gold 6271C CPU @ 2.60GHz
-GPU:NVIDIA Tesla V100
+GPU:NVIDIA Tesla V100
```
-* 设备2:
+* 设备2:
```
CPU:96 Intel(R) Xeon(R) Gold 6271C CPU @ 2.60GHz
-GPU:NVIDIA TESLA T4
+GPU:NVIDIA TESLA T4
```
@@ -461,7 +461,7 @@ CenterNetPostProcess:
本案例中,基于 HardNet 作为 backbone,使用 GIoU Loss 进行优化实验,实验结果如下:
| 模型 | IDF1 |
-| -------- | -------- |
+| -------- | -------- |
| MCFairMOT+HardNet+ByteTracker| 57.2|
| MCFairMOT+HardNet+GIoU Loss+ByteTracker| 57.6|
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/fairmot_hardnet85_30e_1088x608.yml b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/fairmot_hardnet85_30e_1088x608.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_dla34_30e_1088x608_visdrone_vehicle.yml b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_dla34_30e_1088x608_visdrone_vehicle.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_dla34_30e_1088x608_visdrone_vehicle_coco.yml b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_dla34_30e_1088x608_visdrone_vehicle_coco.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_dla34_30e_1088x608_visdrone_vehicle_coco_adam.yml b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_dla34_30e_1088x608_visdrone_vehicle_coco_adam.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_hardnet_30e_1088x608_visdrone_vehicle.yml b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_hardnet_30e_1088x608_visdrone_vehicle.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_hardnet_30e_1088x608_visdrone_vehicle_giou.yml b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_hardnet_30e_1088x608_visdrone_vehicle_giou.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle.yml b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/config/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/images/VisDrone.jpg b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/images/VisDrone.jpg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/images/baseline.gif b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/images/baseline.gif
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/images/demo.gif b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/images/demo.gif
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/mcmot_metrics.py b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/mcmot_metrics.py
old mode 100644
new mode 100755
index 1366edd58..4dda01534
--- a/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/mcmot_metrics.py
+++ b/Paddle_Industry_Practice_Sample_Library/Multi-Class_Vehicle_Tracking/mcmot_metrics.py
@@ -450,7 +450,7 @@ def accumulate(self):
self.cls_summary_list = []
for row in range(self.num_classes):
# 如果不是车相关的4个类别,则跳过不参与计算
- if self.num_classes == 10 and row not in [3,4,5,8]:
+ if self.num_classes == 10 and row not in [3, 4, 5, 8]:
continue
seqs_cls_df = pd.concat(self.seqs_overall[row])
seqs_cls_summary = seqs_overall_metrics(seqs_cls_df)
diff --git a/Paddle_Industry_Practice_Sample_Library/MultimodalVideoTag/README.md b/Paddle_Industry_Practice_Sample_Library/MultimodalVideoTag/README.md
old mode 100644
new mode 100755
index 92bbd0102..df28c92fb
--- a/Paddle_Industry_Practice_Sample_Library/MultimodalVideoTag/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/MultimodalVideoTag/README.md
@@ -101,16 +101,16 @@ print('label: ', record['label'])
!head -n 10 datasets/val.txt
```
- ffce63f737137cab7b50126c10f636e3.mp4 小俊00001我的世界如果一个女的很喜欢你但你拒绝了会怎么样 游戏-沙盒
- caf17db009b7dcc77b4716384efb320e.mp4 和平精英这游戏玩的太难了 游戏-射击
- e977bd4ddd022624b74e6a0d1f366aad.mp4 女神 拍人-美女
- a2c66030625494b7aa45648e4c445805.mp4 过年啦 美食-美食展示
- f7fe09e2734eb5ad84595a821d0db6b8.mp4 新神第五人格先知我活得像个魔术师 游戏-角色扮演
- d57b2a72267aee3d83df645663355a5b.mp4 你想去哪都可以有个条件就是你是我的 游戏-MOBA
- a569d6e829baa41129ea8c78baa92601.mp4 皇室战争里程碑的纪念莽近200 游戏-策略游戏
- 533b586f5426425f979ccabc68cfda77.mp4 这都是谁呢 拍人-萌娃
- a96197ffe31a810012eb83881ab3856f.mp4 比起扎头发女生披肩散发更让男生心动 拍人-美女
- 2cd7fe0e846ac246eaad54cfaaaf2715.mp4 和平精英和平精英搞笑视频太难了 游戏-射击
+ ffce63f737137cab7b50126c10f636e3.mp4 小俊00001我的世界如果一个女的很喜欢你但你拒绝了会怎么样 游戏-沙盒
+ caf17db009b7dcc77b4716384efb320e.mp4 和平精英这游戏玩的太难了 游戏-射击
+ e977bd4ddd022624b74e6a0d1f366aad.mp4 女神 拍人-美女
+ a2c66030625494b7aa45648e4c445805.mp4 过年啦 美食-美食展示
+ f7fe09e2734eb5ad84595a821d0db6b8.mp4 新神第五人格先知我活得像个魔术师 游戏-角色扮演
+ d57b2a72267aee3d83df645663355a5b.mp4 你想去哪都可以有个条件就是你是我的 游戏-MOBA
+ a569d6e829baa41129ea8c78baa92601.mp4 皇室战争里程碑的纪念莽近200 游戏-策略游戏
+ 533b586f5426425f979ccabc68cfda77.mp4 这都是谁呢 拍人-萌娃
+ a96197ffe31a810012eb83881ab3856f.mp4 比起扎头发女生披肩散发更让男生心动 拍人-美女
+ 2cd7fe0e846ac246eaad54cfaaaf2715.mp4 和平精英和平精英搞笑视频太难了 游戏-射击
```python
# 标签文件
diff --git a/Paddle_Industry_Practice_Sample_Library/PCB_Fault_Detection/PCB_faster_rcnn_r50_fpn_3x_coco.yml b/Paddle_Industry_Practice_Sample_Library/PCB_Fault_Detection/PCB_faster_rcnn_r50_fpn_3x_coco.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/PCB_Fault_Detection/dataset_analysis.py b/Paddle_Industry_Practice_Sample_Library/PCB_Fault_Detection/dataset_analysis.py
old mode 100644
new mode 100755
index 1ca36f979..3cbd7294d
--- a/Paddle_Industry_Practice_Sample_Library/PCB_Fault_Detection/dataset_analysis.py
+++ b/Paddle_Industry_Practice_Sample_Library/PCB_Fault_Detection/dataset_analysis.py
@@ -17,8 +17,8 @@
area_ratios = []
label_count = defaultdict(int)
for anno in data['annotations']:
- hw_ratios.append(anno['bbox'][3]/anno['bbox'][2])
- area_ratios.append(anno['area']/imgs[anno['image_id']]['area'])
+ hw_ratios.append(anno['bbox'][3] / anno['bbox'][2])
+ area_ratios.append(anno['area'] / imgs[anno['image_id']]['area'])
label_count[anno['category_id']] += 1
print(label_count, len(data['annotations']) / len(data['images']))
@@ -27,4 +27,4 @@
plt.show()
plt.hist(area_ratios, bins=100, range=[0, 0.005])
-plt.show()
\ No newline at end of file
+plt.show()
diff --git a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/README.md b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/README.md
old mode 100644
new mode 100755
index 9e4f2497e..689a87049
--- a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/README.md
@@ -16,7 +16,7 @@
-**如果您觉得本案例对您有帮助,欢迎Star收藏一下,不易走丢哦~,链接指路:**
+**如果您觉得本案例对您有帮助,欢迎Star收藏一下,不易走丢哦~,链接指路:**
[https://github.com/PaddlePaddle/awesome-DeepLearning](http://https://github.com/PaddlePaddle/awesome-DeepLearning)
# 1. 方案设计
@@ -26,7 +26,7 @@
-# 2. 环境搭建与准备
+# 2. 环境搭建与准备
安装PaddlePaddle: 参考[快速安装](https://www.paddlepaddle.org.cn/install/quick),aistudio已经预先安装好了PaddlePaddle。
@@ -54,26 +54,26 @@ pip install paddlehub==2.0.4 -i https://pypi.tuna.tsinghua.edu.cn/simple
数据分为训练集、验证集和测试集,训练集为每种桃子各1500张,验证集每种桃子各100张,测试集每种桃子为15张。
```
-├─data: 数据目录
- ├─train_list.txt:训练集数据列表
- ├─test_list.txt:测试集数据列表
- ├─validate_list.txt:验证集数据列表
- ├─label_list.txt:标签列表
- └─……
+├─data: 数据目录
+ ├─train_list.txt:训练集数据列表
+ ├─test_list.txt:测试集数据列表
+ ├─validate_list.txt:验证集数据列表
+ ├─label_list.txt:标签列表
+ └─……
```
训练集、验证集和测试集的数据列表文件的格式如下,列与列之间以空格键分隔。
```
-图片1路径 图片1标签
-图片2路径 图片2标签
+图片1路径 图片1标签
+图片2路径 图片2标签
...
```
label_list.txt的格式如下:
```
-分类1名称
-分类2名称
+分类1名称
+分类2名称
...
```
@@ -96,7 +96,7 @@ import paddlehub as hub
class DemoDataset(paddle.io.Dataset):
- def __init__(self, transforms, num_classes=4, mode='train'):
+ def __init__(self, transforms, num_classes=4, mode='train'):
# 数据集存放位置
self.dataset_dir = "./work/peach-classification" #dataset_dir为数据集实际路径,需要填写全路径
self.transforms = transforms
@@ -109,16 +109,16 @@ class DemoDataset(paddle.io.Dataset):
self.file = 'test_list.txt'
else:
self.file = 'validate_list.txt'
-
+
self.file = os.path.join(self.dataset_dir , self.file)
self.data = []
-
+
with open(self.file, 'r') as f:
for line in f.readlines():
line = line.strip()
if line != '':
self.data.append(line)
-
+
def __getitem__(self, idx):
img_path, grt = self.data[idx].split(' ')
img_path = os.path.join(self.dataset_dir, img_path)
@@ -183,7 +183,7 @@ import paddlehub as hub
model = hub.Module(name='resnet50_vd_imagenet_ssld', label_list=["R0", "B1", "M2", "S3"])
```
-# **5. 模型训练**
+# **5. 模型训练**
完整的训练过程包括:构建训练集,指定优化器,使用Resnet50模型进行Fine-tune。命令如下:
@@ -286,4 +286,4 @@ data =r.json()["results"]['data']
**数据来源**
-本案例数据集来源于:https://aistudio.baidu.com/aistudio/datasetdetail/67225
\ No newline at end of file
+本案例数据集来源于:https://aistudio.baidu.com/aistudio/datasetdetail/67225
diff --git a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/dataset.py b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/dataset.py
old mode 100644
new mode 100755
index 6fe3128bd..bb616c01f
--- a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/dataset.py
+++ b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/dataset.py
@@ -6,7 +6,7 @@
class DemoDataset(paddle.io.Dataset):
- def __init__(self, transforms, num_classes=4, mode='train'):
+ def __init__(self, transforms, num_classes=4, mode='train'):
# 数据集存放位置
self.dataset_dir = "./dataset/peach-classification" #dataset_dir为数据集实际路径,需要填写全路径
self.transforms = transforms
@@ -19,22 +19,21 @@ def __init__(self, transforms, num_classes=4, mode='train'):
self.file = 'test_list.txt'
else:
self.file = 'validate_list.txt'
-
- self.file = os.path.join(self.dataset_dir , self.file)
+
+ self.file = os.path.join(self.dataset_dir, self.file)
self.data = []
-
+
with open(self.file, 'r') as f:
for line in f.readlines():
line = line.strip()
if line != '':
self.data.append(line)
-
+
def __getitem__(self, idx):
img_path, grt = self.data[idx].split(' ')
img_path = os.path.join(self.dataset_dir, img_path)
im = self.transforms(img_path)
return im, int(grt)
-
def __len__(self):
return len(self.data)
diff --git a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/test.py b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/test.py
old mode 100644
new mode 100755
index c295d6364..77eb770d2
--- a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/test.py
+++ b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/test.py
@@ -5,7 +5,8 @@
def predict():
- model = hub.Module(name='resnet50_vd_imagenet_ssld', label_list=["R0", "B1", "M2", "S3"])
+ model = hub.Module(
+ name='resnet50_vd_imagenet_ssld', label_list=["R0", "B1", "M2", "S3"])
img_path = './dataset/test.jpg'
img = Image.open(img_path)
plt.imshow(img)
diff --git a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/train.py b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/train.py
old mode 100644
new mode 100755
index 5c9ed0433..e31da9e79
--- a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/train.py
@@ -8,19 +8,31 @@
def train():
transforms = T.Compose(
- [T.Resize((256, 256)),
- T.CenterCrop(224),
- T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])],
- to_rgb=True)
+ [
+ T.Resize((256, 256)), T.CenterCrop(224), T.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ ],
+ to_rgb=True)
peach_train = DemoDataset(transforms)
- peach_validate = DemoDataset(transforms, mode='val')
-
- model = hub.Module(name='resnet50_vd_imagenet_ssld', label_list=["R0", "B1", "M2", "S3"])
-
- optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
- trainer = Trainer(model, optimizer, checkpoint_dir='img_classification_ckpt', use_gpu=True)
- trainer.train(peach_train, epochs=10, batch_size=16, eval_dataset=peach_validate, save_interval=1)
+ peach_validate = DemoDataset(transforms, mode='val')
+
+ model = hub.Module(
+ name='resnet50_vd_imagenet_ssld', label_list=["R0", "B1", "M2", "S3"])
+
+ optimizer = paddle.optimizer.Adam(
+ learning_rate=0.001, parameters=model.parameters())
+ trainer = Trainer(
+ model,
+ optimizer,
+ checkpoint_dir='img_classification_ckpt',
+ use_gpu=True)
+ trainer.train(
+ peach_train,
+ epochs=10,
+ batch_size=16,
+ eval_dataset=peach_validate,
+ save_interval=1)
if __name__ == '__main__':
diff --git a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/val.py b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/val.py
old mode 100644
new mode 100755
index ca0d25541..be23224fe
--- a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/val.py
+++ b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/code/val.py
@@ -8,18 +8,26 @@
def valid():
transforms = T.Compose(
- [T.Resize((256, 256)),
- T.CenterCrop(224),
- T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])],
- to_rgb=True)
+ [
+ T.Resize((256, 256)), T.CenterCrop(224), T.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ ],
+ to_rgb=True)
peach_test = DemoDataset(transforms, mode='test')
- model = hub.Module(name='resnet50_vd_imagenet_ssld', label_list=["R0", "B1", "M2", "S3"])
+ model = hub.Module(
+ name='resnet50_vd_imagenet_ssld', label_list=["R0", "B1", "M2", "S3"])
- optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
- trainer = Trainer(model, optimizer, checkpoint_dir='img_classification_ckpt', use_gpu=True)
+ optimizer = paddle.optimizer.Adam(
+ learning_rate=0.001, parameters=model.parameters())
+ trainer = Trainer(
+ model,
+ optimizer,
+ checkpoint_dir='img_classification_ckpt',
+ use_gpu=True)
trainer.evaluate(peach_test, 16)
+
if __name__ == '__main__':
valid()
diff --git a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/dataset/test.jpg b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/dataset/test.jpg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/imgs/1.jpg b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/imgs/1.jpg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/imgs/2.png b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/imgs/2.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Peach_Classify/imgs/3.jpg b/Paddle_Industry_Practice_Sample_Library/Peach_Classify/imgs/3.jpg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/README.md b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/README.md
old mode 100644
new mode 100755
index 5bd5bf603..6f5ef0f93
--- a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/README.md
@@ -17,7 +17,7 @@
-## 2.技术难点
+## 2.技术难点
* **人流密度过高时,容易造成漏检:** 在人流密度较高的场合,人与人之间存在遮挡,会导致模型误检、漏检问题。
* **在动态场景下,容易造成重识别问题:** 模型需要对遮挡后重新出现的行人进行准确的重识别,否则对一段时间内的人流统计会有较大的影响。
@@ -26,9 +26,9 @@
## 3. 解决方案
-人流量统计任务需要在检测到目标的类别和位置信息的同时,识别出帧与帧间的关联信息,确保视频中的同一个人不会被多次识别并计数。本案例选取PaddleDetection目标跟踪算法中的FairMOT模型来解决人流量统计问题。
+人流量统计任务需要在检测到目标的类别和位置信息的同时,识别出帧与帧间的关联信息,确保视频中的同一个人不会被多次识别并计数。本案例选取PaddleDetection目标跟踪算法中的FairMOT模型来解决人流量统计问题。
-FairMOT以Anchor Free的CenterNet检测器为基础,深浅层特征融合使得检测和ReID任务各自获得所需要的特征,实现了两个任务之间的公平性,并获得了更高水平的实时多目标跟踪精度。
+FairMOT以Anchor Free的CenterNet检测器为基础,深浅层特征融合使得检测和ReID任务各自获得所需要的特征,实现了两个任务之间的公平性,并获得了更高水平的实时多目标跟踪精度。
针对拍摄角度不同(平角或俯角)以及人员疏密程度,在本案例设计了不同的训练方法:
@@ -345,24 +345,24 @@ python deploy/python/mot_jde_infer.py --model_dir=output_inference/fairmot_dla34
* 更多学习资料请参阅[飞桨深度学习平台](https://www.paddlepaddle.org.cn/?fr=paddleEdu_aistudio)
-
+
## 13. 引用
```
@article{zhang2020fair,
- title={FairMOT: On the Fairness of Detection and Re-Identification in Multiple Object Tracking},
- author={Zhang, Yifu and Wang, Chunyu and Wang, Xinggang and Zeng, Wenjun and Liu, Wenyu},
- journal={arXiv preprint arXiv:2004.01888},
- year={2020}
+ title={FairMOT: On the Fairness of Detection and Re-Identification in Multiple Object Tracking},
+ author={Zhang, Yifu and Wang, Chunyu and Wang, Xinggang and Zeng, Wenjun and Liu, Wenyu},
+ journal={arXiv preprint arXiv:2004.01888},
+ year={2020}
}
@InProceedings{Sundararaman_2021_CVPR,
- author={Sundararaman, Ramana and De Almeida Braga, Cedric and Marchand, Eric and Pettre, Julien},
- title={Tracking Pedestrian Heads in Dense Crowd},
- booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
- month={June},
- year={2021},
- pages={3865-3875}
+ author={Sundararaman, Ramana and De Almeida Braga, Cedric and Marchand, Eric and Pettre, Julien},
+ title={Tracking Pedestrian Heads in Dense Crowd},
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
+ month={June},
+ year={2021},
+ pages={3865-3875}
}
```
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/centernet_fpn_attention.py b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/centernet_fpn_attention.py
old mode 100644
new mode 100755
index c6eca57e4..d4899293b
--- a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/centernet_fpn_attention.py
+++ b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/centernet_fpn_attention.py
@@ -21,17 +21,41 @@
from ppdet.modeling.layers import ConvNormLayer
from ..shape_spec import ShapeSpec
-
import paddle.nn.functional as F
+
# attention
+
# SGE attention
class BasicConv(nn.Layer):
- def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias_attr=False):
+ def __init__(self,
+ in_planes,
+ out_planes,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ relu=True,
+ bn=True,
+ bias_attr=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
- self.conv = nn.Conv2D(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias_attr=bias_attr)
- self.bn = nn.BatchNorm2D(out_planes, epsilon=1e-5, momentum=0.01, weight_attr=False, bias_attr=False) if bn else None
+ self.conv = nn.Conv2D(
+ in_planes,
+ out_planes,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups,
+ bias_attr=bias_attr)
+ self.bn = nn.BatchNorm2D(
+ out_planes,
+ epsilon=1e-5,
+ momentum=0.01,
+ weight_attr=False,
+ bias_attr=False) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
@@ -45,7 +69,9 @@ def forward(self, x):
class ChannelPool(nn.Layer):
def forward(self, x):
- return paddle.concat((paddle.max(x,1).unsqueeze(1), paddle.mean(x,1).unsqueeze(1)), axis=1)
+ return paddle.concat(
+ (paddle.max(x, 1).unsqueeze(1), paddle.mean(x, 1).unsqueeze(1)),
+ axis=1)
class SpatialGate(nn.Layer):
@@ -53,13 +79,21 @@ def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
- self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
- print(f'************************************ use SpatialGate ************************************')
+ self.spatial = BasicConv(
+ 2,
+ 1,
+ kernel_size,
+ stride=1,
+ padding=(kernel_size - 1) // 2,
+ relu=False)
+ print(
+ f'************************************ use SpatialGate ************************************'
+ )
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
- scale = F.sigmoid(x_out) # broadcasting
+ scale = F.sigmoid(x_out) # broadcasting
return x * scale
@@ -73,9 +107,11 @@ def autopad(k, p=None): # kernel, padding
class Conv(nn.Layer):
# Standard convolution
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1,
+ act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
- self.conv = nn.Conv2D(c1, c2, k, s, autopad(k, p), groups=g, bias_attr=False)
+ self.conv = nn.Conv2D(
+ c1, c2, k, s, autopad(k, p), groups=g, bias_attr=False)
self.bn = nn.BatchNorm2D(c2)
self.act = nn.LeakyReLU(0.1) if act else nn.Identity()
@@ -87,9 +123,11 @@ def fuseforward(self, x):
class SANN_Attention(nn.Layer):
- def __init__(self, k_size = 3, ch = 64, s_state = False, c_state = False):
+ def __init__(self, k_size=3, ch=64, s_state=False, c_state=False):
super(SANN_Attention, self).__init__()
- print(f'************************************use SANN_Attention s_state => {s_state} -- c_state => {c_state}')
+ print(
+ f'************************************use SANN_Attention s_state => {s_state} -- c_state => {c_state}'
+ )
self.avg_pool = nn.AdaptiveAvgPool2D(1)
self.max_pool = nn.AdaptiveAvgPool2D(1)
self.sigmoid = nn.Sigmoid()
@@ -97,10 +135,17 @@ def __init__(self, k_size = 3, ch = 64, s_state = False, c_state = False):
self.c_state = c_state
if c_state:
- self.c_attention = nn.Sequential(nn.Conv1D(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias_attr=False),
- nn.LayerNorm([1, ch]),
- nn.LeakyReLU(0.3),
- nn.Linear(ch, ch, bias_attr=False))
+ self.c_attention = nn.Sequential(
+ nn.Conv1D(
+ 1,
+ 1,
+ kernel_size=k_size,
+ padding=(k_size - 1) // 2,
+ bias_attr=False),
+ nn.LayerNorm([1, ch]),
+ nn.LeakyReLU(0.3),
+ nn.Linear(
+ ch, ch, bias_attr=False))
if s_state:
self.conv_s = nn.Sequential(Conv(ch, ch // 4, k=1))
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/centernet_head_iou_head.py b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/centernet_head_iou_head.py
old mode 100644
new mode 100755
index f67c84439..67eae0c9e
--- a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/centernet_head_iou_head.py
+++ b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/centernet_head_iou_head.py
@@ -143,11 +143,17 @@ def forward(self, feat, inputs):
iou = self.iou(feat)
offset = self.offset(feat)
if self.training:
- loss = self.get_loss(heatmap, size, iou, offset, self.weights, inputs)
+ loss = self.get_loss(heatmap, size, iou, offset, self.weights,
+ inputs)
return loss
else:
heatmap = F.sigmoid(heatmap)
- return {'heatmap': heatmap, 'size': size, 'iou':iou, 'offset': offset}
+ return {
+ 'heatmap': heatmap,
+ 'size': size,
+ 'iou': iou,
+ 'offset': offset
+ }
def get_loss(self, heatmap, size, iou, offset, weights, inputs):
heatmap_target = inputs['heatmap']
@@ -191,14 +197,14 @@ def get_loss(self, heatmap, size, iou, offset, weights, inputs):
iou_mask.stop_gradient = True
gt_bbox_xys = inputs['bbox_xys']
gt_bbox_xys.stop_gradient = True
- centers_x = (gt_bbox_xys[:,:,0:1] + gt_bbox_xys[:,:,2:3]) / 2.0
- centers_y = (gt_bbox_xys[:,:,1:2] + gt_bbox_xys[:,:,3:4]) / 2.0
- x1 = centers_x - pos_size[:,:,0:1]
- y1 = centers_y - pos_size[:,:,1:2]
- x2 = centers_x + pos_size[:,:,2:3]
- y2 = centers_y + pos_size[:,:,3:4]
+ centers_x = (gt_bbox_xys[:, :, 0:1] + gt_bbox_xys[:, :, 2:3]) / 2.0
+ centers_y = (gt_bbox_xys[:, :, 1:2] + gt_bbox_xys[:, :, 3:4]) / 2.0
+ x1 = centers_x - pos_size[:, :, 0:1]
+ y1 = centers_y - pos_size[:, :, 1:2]
+ x2 = centers_x + pos_size[:, :, 2:3]
+ y2 = centers_y + pos_size[:, :, 3:4]
pred_boxes = paddle.concat([x1, y1, x2, y2], axis=-1)
-
+
iou_loss = self.iou_loss(
pred_boxes * iou_mask,
gt_bbox_xys * iou_mask,
@@ -221,7 +227,9 @@ def get_loss(self, heatmap, size, iou, offset, weights, inputs):
reduction='sum')
offset_loss = offset_loss / (pos_num + 1e-4)
- det_loss = weights['heatmap'] * heatmap_loss + weights['size'] * size_loss + weights['offset'] * offset_loss + weights['iou'] * iou_loss
+ det_loss = weights['heatmap'] * heatmap_loss + weights[
+ 'size'] * size_loss + weights['offset'] * offset_loss + weights[
+ 'iou'] * iou_loss
return {
'det_loss': det_loss,
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/dla_backbones.py b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/dla_backbones.py
old mode 100644
new mode 100755
index 00a238f95..e2cc914e3
--- a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/dla_backbones.py
+++ b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/code/dla_backbones.py
@@ -21,11 +21,11 @@
from ..shape_spec import ShapeSpec
DLA_cfg = {
- 34: ([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512]),
- 46: ([1, 1, 1, 2, 2, 1], [16, 32, 64, 64, 128, 256]),
- 60: ([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024]),
- 102: ([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024])
- }
+ 34: ([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512]),
+ 46: ([1, 1, 1, 2, 2, 1], [16, 32, 64, 64, 128, 256]),
+ 60: ([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024]),
+ 102: ([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024])
+}
class BasicBlock(nn.Layer):
@@ -48,25 +48,24 @@ def __init__(self, ch_in, ch_out, stride=1):
def forward(self, inputs, residual=None):
if residual is None:
- residual = inputs
+ residual = inputs
- out = self.conv1(inputs)
+ out = self.conv1(inputs)
out = F.relu(out)
- out = self.conv2(out)
+ out = self.conv2(out)
out = paddle.add(x=out, y=residual)
out = F.relu(out)
return out
+
class Bottleneck(nn.Layer):
expansion = 2
-
+
def __init__(self, ch_in, ch_out, stride=1, base_width=64, cardinality=1):
super(Bottleneck, self).__init__()
self.stride = stride
- mid_planes = int(
- math.floor(ch_out * (base_width / 64)) * cardinality
- )
+ mid_planes = int(math.floor(ch_out * (base_width / 64)) * cardinality)
mid_planes = mid_planes // self.expansion
self.conv1 = ConvNormLayer(
@@ -90,17 +89,17 @@ def __init__(self, ch_in, ch_out, stride=1, base_width=64, cardinality=1):
stride=1,
bias_on=False,
norm_decay=None)
-
+
def forward(self, inputs, residual=True):
if residual is None:
- residual = inputs
- out = self.conv1(inputs)
+ residual = inputs
+ out = self.conv1(inputs)
out = F.relu(out)
out = self.conv2(out)
out = F.relu(out)
- out = self.conv3(out)
+ out = self.conv3(out)
out += residual
- out = F.relu(out)
+ out = F.relu(out)
return out
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/dataset.md b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/dataset.md
old mode 100644
new mode 100755
index b596a4165..097ae2312
--- a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/dataset.md
+++ b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/dataset.md
@@ -2,7 +2,7 @@
**Caltech Pedestrian**
-Caltech Pedestrain 数据集由加州理工提供、由固定在城市环境中常规行驶的车辆上的摄像头采集得到。数据集包含约10小时的 640x480 30Hz 视频,其中标注了约250,000帧(约137分钟的片段)中的350,000个边界框和2300个行人。更多信息可参考:[Caltech Pedestrain Detection Benchmark](http://www.vision.caltech.edu/Image_Datasets/CaltechPedestrians/)
+Caltech Pedestrain 数据集由加州理工提供、由固定在城市环境中常规行驶的车辆上的摄像头采集得到。数据集包含约10小时的 640x480 30Hz 视频,其中标注了约250,000帧(约137分钟的片段)中的350,000个边界框和2300个行人。更多信息可参考:[Caltech Pedestrain Detection Benchmark](http://www.vision.caltech.edu/Image_Datasets/CaltechPedestrians/)

@@ -80,4 +80,3 @@ MOT17与MOT16数据集相同,但标注更为准确。更多信息可参考:[
6. MOT16: https://motchallenge.net/data/MOT16/
7. MOT17: https://motchallenge.net/data/MOT17/
8. Head Tracking 21: https://motchallenge.net/data/Head_Tracking_21
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/caltech.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/caltech.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/citypersons.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/citypersons.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/cuhk_sysu.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/cuhk_sysu.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/ethz.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/ethz.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/mot16.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/mot16.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/prw.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/dataset/prw.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/demo.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/demo.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/deploy.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/deploy.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/ht_fairmot.gif b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/ht_fairmot.gif
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/optimization/cutmix.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/optimization/cutmix.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/optimization/dcn.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/optimization/dcn.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/optimization/dla.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/optimization/dla.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/optimization/spatial_attention_module.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/optimization/spatial_attention_module.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/pedestrian_detection.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/pedestrian_detection.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/procedure.png b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/images/procedure.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/improvements.md b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/improvements.md
old mode 100644
new mode 100755
index 5cad1888b..1b6f89c28
--- a/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/improvements.md
+++ b/Paddle_Industry_Practice_Sample_Library/Pedestrian_Detection_and_Tracking/improvements.md
@@ -53,9 +53,9 @@ TrainReader:
- LetterBoxResize: {target_size: [608, 1088]}
- MOTRandomAffine: {reject_outside: False}
- RandomFlip: {}
-
+
- Cutmix: {}
-
+
- BboxXYXY2XYWH: {}
- NormalizeBox: {}
- NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1]}
@@ -207,7 +207,7 @@ class DLA(nn.Layer):
"""
def __init__(self, depth=34, residual_root=False):
-
+
```
@@ -231,7 +231,7 @@ GIoU提出一种计算方式,对于两个框A和B,先计算出A、B的最小
$$
GIoU = IoU - \frac{C-(A \cup B)}{C}
$$
-GIoU Loss = 1 - GIoU. 如想尝试增加GIoU Loss,可用 `code/centernet_head_iou_head.py` 替换 `ppdet/modeling/heads/centernet_head.py` 中的代码,并且修改 `ppdet/modeling/architectures/fairmot.py` 文件,在第84行增加 `'iou_loss': det_outs['iou_loss'],` :
+GIoU Loss = 1 - GIoU. 如想尝试增加GIoU Loss,可用 `code/centernet_head_iou_head.py` 替换 `ppdet/modeling/heads/centernet_head.py` 中的代码,并且修改 `ppdet/modeling/architectures/fairmot.py` 文件,在第84行增加 `'iou_loss': det_outs['iou_loss'],` :
```python
det_loss = det_outs['det_loss']
@@ -275,4 +275,3 @@ return loss
| dla46c 4gpu bs8 momentum + imagenet_pretrain | 61.2 | 16.863 |
| dla60 4gpu bs8 momentum + imagenet_pretrain | 58.8 | 12.531 |
| dla102 4gpu bs8 momentum + imagenet_pretrain | 54.8 | 12.469 |
-
diff --git a/Paddle_Industry_Practice_Sample_Library/README.md b/Paddle_Industry_Practice_Sample_Library/README.md
old mode 100644
new mode 100755
index 305c73789..e7e149bf2
--- a/Paddle_Industry_Practice_Sample_Library/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/README.md
@@ -19,4 +19,3 @@
欢迎报名直播课加入交流群,如需更多技术交流与合作可点击以下链接:
[https://paddleqiyeban.wjx.cn/vj/Qlb0uS3.aspx?udsid=531417](https://paddleqiyeban.wjx.cn/vj/Qlb0uS3.aspx?udsid=531417)
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/README.md b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/README.md
old mode 100644
new mode 100755
index 08237cabb..d42050db0
--- a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/README.md
@@ -91,8 +91,8 @@ cd ./pretrain_models/ && tar xf ch_ppocr_mobile_v2.0_det_train.tar
```
./pretrain_models/ch_ppocr_mobile_v2.0_det_train/
- └─ best_accuracy.pdopt
- └─ best_accuracy.pdparams
+ └─ best_accuracy.pdopt
+ └─ best_accuracy.pdparams
└─ best_accuracy.states
```
@@ -102,15 +102,15 @@ cd ./pretrain_models/ && tar xf ch_ppocr_mobile_v2.0_det_train.tar
```
Global:
- └─pretrained_model:./pretrain_models/ch_ppocr_mobile_v2.0_det_train/best_accuracy
+ └─pretrained_model:./pretrain_models/ch_ppocr_mobile_v2.0_det_train/best_accuracy
Train:
- └─dataset
- └─data_dir:path/to/your/dataset
- └─label_file_list:path/to/your/dataset/label.txt
+ └─dataset
+ └─data_dir:path/to/your/dataset
+ └─label_file_list:path/to/your/dataset/label.txt
Eval:
- └─dataset
- └─data_dir:path/to/your/dataset
- └─label_file_list:path/to/your/dataset/label.txt
+ └─dataset
+ └─data_dir:path/to/your/dataset
+ └─label_file_list:path/to/your/dataset/label.txt
```
**注意:**
@@ -259,5 +259,3 @@ python DocRec.py
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/code/DocRec.py b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/code/DocRec.py
old mode 100644
new mode 100755
index 1ab79360b..c1bac2fef
--- a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/code/DocRec.py
+++ b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/code/DocRec.py
@@ -6,9 +6,9 @@
import matplotlib, os
-
# 命名实体识别与词频统计
+
def deletaNum(doc):
return [i for i in doc if len(i) > 1]
@@ -39,7 +39,8 @@ def PlotHist(counter):
cnt[key] = counter[key]
print(cnt)
plt.figure(figsize=(10, 5))
- plt.bar(range(len(cnt)), cnt.values(), tick_label=list(cnt.keys())) # , orientation="horizontal"
+ plt.bar(range(len(cnt)), cnt.values(),
+ tick_label=list(cnt.keys())) # , orientation="horizontal"
# for a, b in zip(x_list, y_list):
# plt.text(a, b + 0.05, '%.0f' % b, ha='center', va='bottom', fontsize=10)
plt.xticks(rotation=45)
@@ -49,8 +50,9 @@ def PlotHist(counter):
if __name__ == '__main__':
# 模型路径下必须含有model和params文件
- ocr = PaddleOCR(det_model_dir='./PaddleOCR/output/ch_db_mv3_inference/inference',
- use_angle_cls=True)
+ ocr = PaddleOCR(
+ det_model_dir='./PaddleOCR/output/ch_db_mv3_inference/inference',
+ use_angle_cls=True)
lac = Taskflow("pos_tagging")
enti_list = []
@@ -72,4 +74,3 @@ def PlotHist(counter):
print('Entity results:', counter)
PlotHist(counter)
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/code/splitPDF.py b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/code/splitPDF.py
old mode 100644
new mode 100755
index d75b7694c..e28d828aa
--- a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/code/splitPDF.py
+++ b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/code/splitPDF.py
@@ -1,6 +1,7 @@
# pdf切分
import os, fitz, time
+
def pdf2png(pdfPath, imgPath, zoom_x=2, zoom_y=2, rotation_angle=0):
'''
# 将PDF转化为图片
@@ -21,22 +22,24 @@ def pdf2png(pdfPath, imgPath, zoom_x=2, zoom_y=2, rotation_angle=0):
for pg in range(0, pdf.pageCount):
page = pdf[pg]
# 设置缩放和旋转系数
- trans = fitz.Matrix(zoom_x, zoom_y) #.preRotate(rotation_angle)
+ trans = fitz.Matrix(zoom_x, zoom_y) #.preRotate(rotation_angle)
pm = page.getPixmap(matrix=trans, alpha=False)
- if pm.width>2000 or pm.height>2000:
+ if pm.width > 2000 or pm.height > 2000:
pm = page.getPixmap(matrix=fitz.Matrix(1, 1), alpha=False)
pm.writePNG(imgPath + str(pg) + ".jpeg")
pdf.close()
time_end = time.time()
time_cost = time_end - time_start
- print('totally cost: {}, page: {}, each page cost: {}'.format(time_cost, pg+1, time_cost/(pg+1)))
+ print('totally cost: {}, page: {}, each page cost: {}'.format(
+ time_cost, pg + 1, time_cost / (pg + 1)))
+
if __name__ == '__main__':
pdfFolder = 'ResearchReport'
for p in os.listdir(pdfFolder):
- pdfPath = pdfFolder+'/'+p
- imgPath = pdfFolder+'/'+os.path.basename(p)[:-4]+'/'
+ pdfPath = pdfFolder + '/' + p
+ imgPath = pdfFolder + '/' + os.path.basename(p)[:-4] + '/'
print(imgPath)
os.mkdir(imgPath)
- pdf2png(pdfPath, imgPath)
\ No newline at end of file
+ pdf2png(pdfPath, imgPath)
diff --git a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/imgs/img.png b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/imgs/img.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/imgs/pipeline.png b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/imgs/pipeline.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/imgs/ppocr_framework.png b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/imgs/ppocr_framework.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/imgs/result.jpeg b/Paddle_Industry_Practice_Sample_Library/Report_Recognition_and_Analysis/imgs/result.jpeg
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/README.md b/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/code/predict.py b/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/code/predict.py
old mode 100644
new mode 100755
index c757a28f0..6b73d732b
--- a/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/code/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/code/predict.py
@@ -6,14 +6,15 @@
import paddle.nn.functional as F
import paddle.nn as nn
-
from paddlenlp.datasets import load_dataset
import paddlenlp
from paddlenlp.data import Stack, Pad, Tuple
from paddlenlp.transformers import LinearDecayWithWarmup
from functools import partial
-tokenizer = paddlenlp.transformers.ErnieGramTokenizer.from_pretrained('ernie-gram-zh')
+tokenizer = paddlenlp.transformers.ErnieGramTokenizer.from_pretrained(
+ 'ernie-gram-zh')
+
def convert_example(example, tokenizer, max_seq_length=512, is_test=False):
@@ -34,7 +35,7 @@ def convert_example(example, tokenizer, max_seq_length=512, is_test=False):
class PointwiseMatching(nn.Layer):
-
+
# 此处的 pretained_model 在本例中会被 ERNIE-Gram 预训练模型初始化
def __init__(self, pretrained_model, dropout=None):
super().__init__()
@@ -64,8 +65,9 @@ def forward(self,
return probs
+
def predict(model, data_loader):
-
+
batch_probs = []
# 预测阶段打开 eval 模式,模型中的 dropout 等操作会关掉
@@ -76,7 +78,7 @@ def predict(model, data_loader):
input_ids, token_type_ids = batch_data
input_ids = paddle.to_tensor(input_ids)
token_type_ids = paddle.to_tensor(token_type_ids)
-
+
# 获取每个样本的预测概率: [batch_size, 2] 的矩阵
batch_prob = model(
input_ids=input_ids, token_type_ids=token_type_ids).numpy()
@@ -90,10 +92,7 @@ def predict(model, data_loader):
# 预测数据的转换函数
# predict 数据没有 label, 因此 convert_exmaple 的 is_test 参数设为 True
trans_func = partial(
- convert_example,
- tokenizer=tokenizer,
- max_seq_length=512,
- is_test=True)
+ convert_example, tokenizer=tokenizer, max_seq_length=512, is_test=True)
# 预测数据的组 batch 操作
# predict 数据只返回 input_ids 和 token_type_ids,因此只需要 2 个 Pad 对象作为 batchify_fn
@@ -108,17 +107,19 @@ def predict(model, data_loader):
batch_sampler = paddle.io.BatchSampler(test_ds, batch_size=32, shuffle=False)
# 生成预测数据 data_loader
-predict_data_loader =paddle.io.DataLoader(
- dataset=test_ds.map(trans_func),
- batch_sampler=batch_sampler,
- collate_fn=batchify_fn,
- return_list=True)
+predict_data_loader = paddle.io.DataLoader(
+ dataset=test_ds.map(trans_func),
+ batch_sampler=batch_sampler,
+ collate_fn=batchify_fn,
+ return_list=True)
-pretrained_model = paddlenlp.transformers.ErnieGramModel.from_pretrained('ernie-gram-zh')
+pretrained_model = paddlenlp.transformers.ErnieGramModel.from_pretrained(
+ 'ernie-gram-zh')
model = PointwiseMatching(pretrained_model)
-state_dict = paddle.load("./ernie_gram_zh_pointwise_matching_model/model_state.pdparams")
+state_dict = paddle.load(
+ "./ernie_gram_zh_pointwise_matching_model/model_state.pdparams")
model.set_dict(state_dict)
@@ -132,12 +133,11 @@ def predict(model, data_loader):
y_preds = np.argmax(y_probs, axis=1)
with open("lcqmc.tsv", 'w', encoding="utf-8") as f:
- f.write("index\tprediction\n")
+ f.write("index\tprediction\n")
for idx, y_pred in enumerate(y_preds):
f.write("{}\t{}\n".format(idx, y_pred))
text_pair = test_ds[idx]
text_pair["label"] = y_pred
# print(text_pair)
-# 打印其中的一条内容
+ # 打印其中的一条内容
print(text_pair)
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/code/train.py b/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/code/train.py
old mode 100644
new mode 100755
index b5552ae74..319f2833d
--- a/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/code/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/Semantic_Matching/code/train.py
@@ -6,7 +6,6 @@
import paddle.nn.functional as F
import paddle.nn as nn
-
from paddlenlp.datasets import load_dataset
import paddlenlp
from paddlenlp.data import Stack, Pad, Tuple
@@ -15,7 +14,9 @@
train_ds, dev_ds = load_dataset("lcqmc", splits=["train", "dev"])
-tokenizer = paddlenlp.transformers.ErnieGramTokenizer.from_pretrained('ernie-gram-zh')
+tokenizer = paddlenlp.transformers.ErnieGramTokenizer.from_pretrained(
+ 'ernie-gram-zh')
+
def convert_example(example, tokenizer, max_seq_length=512, is_test=False):
@@ -35,12 +36,8 @@ def convert_example(example, tokenizer, max_seq_length=512, is_test=False):
return input_ids, token_type_ids
-
# 训练集和验证集的样本转换函数
-trans_func = partial(
- convert_example,
- tokenizer=tokenizer,
- max_seq_length=512)
+trans_func = partial(convert_example, tokenizer=tokenizer, max_seq_length=512)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
@@ -48,30 +45,30 @@ def convert_example(example, tokenizer, max_seq_length=512, is_test=False):
Stack(dtype="int64") # label
): [data for data in fn(samples)]
-
-
# 定义分布式 Sampler: 自动对训练数据进行切分,支持多卡并行训练
-batch_sampler = paddle.io.DistributedBatchSampler(train_ds, batch_size=32, shuffle=True)
+batch_sampler = paddle.io.DistributedBatchSampler(
+ train_ds, batch_size=32, shuffle=True)
# 基于 train_ds 定义 train_data_loader
# 因为我们使用了分布式的 DistributedBatchSampler, train_data_loader 会自动对训练数据进行切分
train_data_loader = paddle.io.DataLoader(
- dataset=train_ds.map(trans_func),
- batch_sampler=batch_sampler,
- collate_fn=batchify_fn,
- return_list=True)
+ dataset=train_ds.map(trans_func),
+ batch_sampler=batch_sampler,
+ collate_fn=batchify_fn,
+ return_list=True)
# 针对验证集数据加载,我们使用单卡进行评估,所以采用 paddle.io.BatchSampler 即可
# 定义 dev_data_loader
batch_sampler = paddle.io.BatchSampler(dev_ds, batch_size=32, shuffle=False)
dev_data_loader = paddle.io.DataLoader(
- dataset=dev_ds.map(trans_func),
- batch_sampler=batch_sampler,
- collate_fn=batchify_fn,
- return_list=True)
+ dataset=dev_ds.map(trans_func),
+ batch_sampler=batch_sampler,
+ collate_fn=batchify_fn,
+ return_list=True)
+
class PointwiseMatching(nn.Layer):
-
+
# 此处的 pretained_model 在本例中会被 ERNIE-Gram 预训练模型初始化
def __init__(self, pretrained_model, dropout=None):
super().__init__()
@@ -101,14 +98,15 @@ def forward(self,
return probs
+
# 我们基于 ERNIE-Gram 模型结构搭建 Point-wise 语义匹配网络
# 所以此处先定义 ERNIE-Gram 的 pretrained_model
-pretrained_model = paddlenlp.transformers.ErnieGramModel.from_pretrained('ernie-gram-zh')
+pretrained_model = paddlenlp.transformers.ErnieGramModel.from_pretrained(
+ 'ernie-gram-zh')
#pretrained_model = paddlenlp.transformers.ErnieModel.from_pretrained('ernie-1.0')
# 定义 Point-wise 语义匹配网络
model = PointwiseMatching(pretrained_model)
-
epochs = 1
num_training_steps = len(train_data_loader) * epochs
@@ -154,8 +152,9 @@ def evaluate(model, criterion, metric, data_loader, phase="dev"):
model.train()
metric.reset()
+
# 接下来,开始正式训练模型,训练时间较长,可注释掉这部分
-def do_train(model,train_data_loader,dev_data_loader):
+def do_train(model, train_data_loader, dev_data_loader):
global_step = 0
tic_train = time.time()
@@ -170,13 +169,13 @@ def do_train(model,train_data_loader,dev_data_loader):
acc = metric.accumulate()
global_step += 1
-
+
# 每间隔 10 step 输出训练指标
if global_step % 10 == 0:
print(
"global step %d, epoch: %d, batch: %d, loss: %.5f, accu: %.5f, speed: %.2f step/s"
% (global_step, epoch, step, loss, acc,
- 10 / (time.time() - tic_train)))
+ 10 / (time.time() - tic_train)))
tic_train = time.time()
loss.backward()
optimizer.step()
@@ -186,7 +185,7 @@ def do_train(model,train_data_loader,dev_data_loader):
# 每间隔 100 step 在验证集和测试集上进行评估
if global_step % 100 == 0:
evaluate(model, criterion, metric, dev_data_loader, "dev")
-
+
# 训练结束后,存储模型参数
save_dir = os.path.join("checkpoint", "model_%d" % global_step)
os.makedirs(save_dir)
@@ -195,5 +194,5 @@ def do_train(model,train_data_loader,dev_data_loader):
paddle.save(model.state_dict(), save_param_path)
tokenizer.save_pretrained(save_dir)
-do_train(model,train_data_loader,dev_data_loader)
+do_train(model, train_data_loader, dev_data_loader)
diff --git a/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/code/fairmot_hardnet85.yml b/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/code/fairmot_hardnet85.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/code/fairmot_hardnet85_30e_1088x608.yml b/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/code/fairmot_hardnet85_30e_1088x608.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/code/fairmot_hardnet85_30e_1088x608_bdd100k_vehicle.yml b/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/code/fairmot_hardnet85_30e_1088x608_bdd100k_vehicle.yml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/BDD100K.png b/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/BDD100K.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/MOTA.png b/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/MOTA.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/data_distribution.png b/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/data_distribution.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/mixup.png b/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/mixup.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/mixup_formula.png b/Paddle_Industry_Practice_Sample_Library/Vehicle_Detection_and_Tracking/images/mixup_formula.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/README.md b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/README.md
old mode 100644
new mode 100755
index 3462b38fb..282f80ac4
--- a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/README.md
@@ -21,21 +21,21 @@
从物流信息中抽取想要的关键信息,实际上是NLP中的实体抽取任务。实体信息抽取技术业务难点如下:
-* **实体类别多,任务复杂度高**
-同一名词可能对应多个实体类别,如“蜗牛”。
-不少实体词与普通用语相同,尤其作品名。
+* **实体类别多,任务复杂度高**
+同一名词可能对应多个实体类别,如“蜗牛”。
+不少实体词与普通用语相同,尤其作品名。
此外,实体类别多,任务复杂度较高,如下所示实体示例:

图1:实体示例
-* **实体词往往稀疏低频**
+* **实体词往往稀疏低频**
人名在语料出现频次高,但不同人名出现频次有限。大多垂类实体词在通用语料出现频次低。
* **特定业务场景中标注数据集较少,需要一定的成本标注,如果数据集质量差或样本量过少,精度会比较低。**
-
+
@@ -234,4 +234,3 @@ python run_erniegram_crf.py
图8:使用推理库进行预测
Paddle Inference 采用 Predictor 进行预测。Predictor 是一个高性能预测引擎,该引擎通过对计算图的分析,完成对计算图的一系列的优化(如OP的融合、内存/显存的优化、 MKLDNN,TensorRT 等底层加速库的支持等),能够大大提升预测性能。另外Paddle Inference提供了Python、C++、GO等多语言的API,可以根据实际环境需要进行选择,例如使用 Paddle Inference 开发 Python 预测程序可参考[示例](https://github.com/PaddlePaddle/PaddleNLP/blob/develop/examples/information_extraction/waybill_ie/deploy/python/predict.py),相关API已安装在Paddle包,直接使用即可。
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/conf/tag.dic b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/conf/tag.dic
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/conf/word.dic b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/conf/word.dic
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/data.py b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/data.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/GRU.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/GRU.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/crf.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/crf.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/dataset1.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/dataset1.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/entity.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/entity.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/ernie1.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/ernie1.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/ernie2.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/ernie2.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/infer.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/infer.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/metric.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/metric.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/rnn1.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/rnn1.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/tag1.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/tag1.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/tag2.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/tag2.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/waybill.png b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/images/waybill.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/model.py b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/model.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_bigru_crf.py b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_bigru_crf.py
old mode 100644
new mode 100755
index 2682e5d58..507d4d780
--- a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_bigru_crf.py
+++ b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_bigru_crf.py
@@ -12,6 +12,7 @@
from paddlenlp.transformers import ErnieGramTokenizer, ErnieGramForTokenClassification
from utils import convert_example
+
def load_dataset(datafiles):
def read(data_path):
with open(data_path, 'r', encoding='utf-8') as fp:
@@ -27,12 +28,15 @@ def read(data_path):
elif isinstance(datafiles, list) or isinstance(datafiles, tuple):
return [MapDataset(list(read(datafile))) for datafile in datafiles]
-train_ds, dev_ds, test_ds = load_dataset(datafiles=(
- './waybill_data/train.txt', './waybill_data/dev.txt', './waybill_data/test.txt'))
+
+train_ds, dev_ds, test_ds = load_dataset(datafiles=('./waybill_data/train.txt',
+ './waybill_data/dev.txt',
+ './waybill_data/test.txt'))
label_vocab = load_dict('./conf/tag.dic')
word_vocab = load_dict('./conf/word.dic')
+
# 将token转换为id
def convert_tokens_to_ids(tokens, vocab, oov_token=None):
token_ids = []
@@ -42,12 +46,14 @@ def convert_tokens_to_ids(tokens, vocab, oov_token=None):
token_ids.append(token_id)
return token_ids
+
# 将文本和label转换为id
def convert_example(example):
- tokens, labels = example
- token_ids = convert_tokens_to_ids(tokens, word_vocab, 'OOV')
- label_ids = convert_tokens_to_ids(labels, label_vocab, 'O')
- return token_ids, len(token_ids), label_ids
+ tokens, labels = example
+ token_ids = convert_tokens_to_ids(tokens, word_vocab, 'OOV')
+ label_ids = convert_tokens_to_ids(labels, label_vocab, 'O')
+ return token_ids, len(token_ids), label_ids
+
# 调用内置的map()方法,进行数据处理:转换id
train_ds.map(convert_example)
@@ -55,32 +61,33 @@ def convert_example(example):
test_ds.map(convert_example)
batchify_fn = lambda samples, fn=Tuple(
- Pad(axis=0, pad_val=word_vocab.get('OOV')), # token_ids
- Stack(), # seq_len
- Pad(axis=0, pad_val=label_vocab.get('O')) # label_ids
- ): fn(samples)
+ Pad(axis=0, pad_val=word_vocab.get('OOV')), # token_ids
+ Stack(), # seq_len
+ Pad(axis=0, pad_val=label_vocab.get('O')) # label_ids
+): fn(samples)
train_loader = paddle.io.DataLoader(
- dataset=train_ds,
- batch_size=32,
- shuffle=True,
- drop_last=True,
- return_list=True,
- collate_fn=batchify_fn)
+ dataset=train_ds,
+ batch_size=32,
+ shuffle=True,
+ drop_last=True,
+ return_list=True,
+ collate_fn=batchify_fn)
dev_loader = paddle.io.DataLoader(
- dataset=dev_ds,
- batch_size=32,
- drop_last=True,
- return_list=True,
- collate_fn=batchify_fn)
+ dataset=dev_ds,
+ batch_size=32,
+ drop_last=True,
+ return_list=True,
+ collate_fn=batchify_fn)
test_loader = paddle.io.DataLoader(
- dataset=test_ds,
- batch_size=32,
- drop_last=True,
- return_list=True,
- collate_fn=batchify_fn)
+ dataset=test_ds,
+ batch_size=32,
+ drop_last=True,
+ return_list=True,
+ collate_fn=batchify_fn)
+
class BiGRUWithCRF(nn.Layer):
def __init__(self,
@@ -110,26 +117,25 @@ def forward(self, x, lens):
_, pred = self.decoder(output, lens)
return output, lens, pred
+
# Define the model netword and its loss
network = BiGRUWithCRF(300, 300, len(word_vocab), len(label_vocab))
model = paddle.Model(network)
-
-optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
+optimizer = paddle.optimizer.Adam(
+ learning_rate=0.001, parameters=model.parameters())
crf_loss = LinearChainCrfLoss(network.crf)
chunk_evaluator = ChunkEvaluator(label_list=label_vocab.keys(), suffix=True)
model.prepare(optimizer, crf_loss, chunk_evaluator)
-
model.fit(train_data=train_loader,
- eval_data=dev_loader,
- epochs=10,
- save_dir='./gru_results',
- log_freq=1)
+ eval_data=dev_loader,
+ epochs=10,
+ save_dir='./gru_results',
+ log_freq=1)
model.evaluate(eval_data=test_loader, log_freq=1)
outputs, lens, decodes = model.predict(test_data=test_loader)
preds = parse_decodes1(test_ds, decodes, lens, label_vocab)
print(len(preds))
print('\n'.join(preds[:5]))
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_ernie.py b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_ernie.py
old mode 100644
new mode 100755
index 7f086dfaf..7413110ef
--- a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_ernie.py
+++ b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_ernie.py
@@ -11,6 +11,7 @@
from paddlenlp.transformers import ErnieTokenizer, ErnieForTokenClassification, ErnieGramTokenizer, ErnieGramForTokenClassification
from utils import convert_example
+
def load_dataset(datafiles):
def read(data_path):
with open(data_path, 'r', encoding='utf-8') as fp:
@@ -26,9 +27,10 @@ def read(data_path):
elif isinstance(datafiles, list) or isinstance(datafiles, tuple):
return [MapDataset(list(read(datafile))) for datafile in datafiles]
-train_ds, dev_ds, test_ds = load_dataset(datafiles=(
- './waybill_data/train.txt', './waybill_data/dev.txt', './waybill_data/test.txt'))
+train_ds, dev_ds, test_ds = load_dataset(datafiles=('./waybill_data/train.txt',
+ './waybill_data/dev.txt',
+ './waybill_data/test.txt'))
label_vocab = load_dict('./conf/tag.dic')
@@ -36,7 +38,8 @@ def read(data_path):
MODEL_NAME = "ernie-1.0"
tokenizer = ErnieTokenizer.from_pretrained(MODEL_NAME)
-trans_func = partial(convert_example, tokenizer=tokenizer, label_vocab=label_vocab)
+trans_func = partial(
+ convert_example, tokenizer=tokenizer, label_vocab=label_vocab)
train_ds.map(trans_func)
dev_ds.map(trans_func)
@@ -50,34 +53,27 @@ def read(data_path):
): fn(samples)
train_loader = paddle.io.DataLoader(
- dataset=train_ds,
- batch_size=200,
- return_list=True,
- collate_fn=batchify_fn)
+ dataset=train_ds, batch_size=200, return_list=True, collate_fn=batchify_fn)
dev_loader = paddle.io.DataLoader(
- dataset=dev_ds,
- batch_size=200,
- return_list=True,
- collate_fn=batchify_fn)
+ dataset=dev_ds, batch_size=200, return_list=True, collate_fn=batchify_fn)
test_loader = paddle.io.DataLoader(
- dataset=test_ds,
- batch_size=200,
- return_list=True,
- collate_fn=batchify_fn)
-
+ dataset=test_ds, batch_size=200, return_list=True, collate_fn=batchify_fn)
# Define the model netword and its loss
-model = ErnieForTokenClassification.from_pretrained("ernie-1.0", num_classes=len(label_vocab))
+model = ErnieForTokenClassification.from_pretrained(
+ "ernie-1.0", num_classes=len(label_vocab))
metric = ChunkEvaluator(label_list=label_vocab.keys(), suffix=True)
loss_fn = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
-optimizer = paddle.optimizer.AdamW(learning_rate=2e-5, parameters=model.parameters())
+optimizer = paddle.optimizer.AdamW(
+ learning_rate=2e-5, parameters=model.parameters())
step = 0
for epoch in range(10):
# Switch the model to training mode
model.train()
- for idx, (input_ids, token_type_ids, length, labels) in enumerate(train_loader):
+ for idx, (input_ids, token_type_ids, length,
+ labels) in enumerate(train_loader):
logits = model(input_ids, token_type_ids)
loss = paddle.mean(loss_fn(logits, labels))
loss.backward()
@@ -87,8 +83,7 @@ def read(data_path):
print("epoch:%d - step:%d - loss: %f" % (epoch, step, loss))
evaluate(model, metric, dev_loader)
- paddle.save(model.state_dict(),
- './ernie_result/model_%d.pdparams' % step)
+ paddle.save(model.state_dict(), './ernie_result/model_%d.pdparams' % step)
# model.save_pretrained('./checkpoint')
# tokenizer.save_pretrained('./checkpoint')
@@ -101,7 +96,3 @@ def read(data_path):
"The results have been saved in the file: %s, some examples are shown below: "
% file_path)
print("\n".join(preds[:10]))
-
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_ernie_crf.py b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_ernie_crf.py
old mode 100644
new mode 100755
index c62b80e5a..08dbc2809
--- a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_ernie_crf.py
+++ b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_ernie_crf.py
@@ -33,7 +33,8 @@ def convert_to_features(example, tokenizer, label_vocab):
labels = ['O'] + labels + ['O']
tokenized_input['labels'] = [label_vocab[x] for x in labels]
return tokenized_input['input_ids'], tokenized_input[
- 'token_type_ids'], tokenized_input['seq_len'], tokenized_input['labels']
+ 'token_type_ids'], tokenized_input['seq_len'], tokenized_input[
+ 'labels']
@paddle.no_grad()
@@ -69,8 +70,9 @@ def predict(model, data_loader, ds, label_vocab):
paddle.set_device('gpu')
# Create dataset, tokenizer and dataloader.
- train_ds, dev_ds, test_ds = load_dataset(datafiles=(
- './waybill_data/train.txt', './waybill_data/dev.txt', './waybill_data/test.txt'))
+ train_ds, dev_ds, test_ds = load_dataset(
+ datafiles=('./waybill_data/train.txt', './waybill_data/dev.txt',
+ './waybill_data/test.txt'))
label_vocab = load_dict('./conf/tag.dic')
tokenizer = ErnieTokenizer.from_pretrained('ernie-1.0')
@@ -129,7 +131,7 @@ def predict(model, data_loader, ds, label_vocab):
evaluate(model, metric, dev_loader)
paddle.save(model.state_dict(),
- './ernie_crf_result/model_%d.pdparams' % step)
+ './ernie_crf_result/model_%d.pdparams' % step)
preds = predict(model, test_loader, test_ds, label_vocab)
file_path = "ernie_crf_results.txt"
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_erniegram.py b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_erniegram.py
old mode 100644
new mode 100755
index 41fbe80b5..fc7748871
--- a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_erniegram.py
+++ b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_erniegram.py
@@ -11,6 +11,7 @@
from paddlenlp.transformers import ErnieGramTokenizer, ErnieGramForTokenClassification
from utils import convert_example
+
def load_dataset(datafiles):
def read(data_path):
with open(data_path, 'r', encoding='utf-8') as fp:
@@ -26,10 +27,10 @@ def read(data_path):
elif isinstance(datafiles, list) or isinstance(datafiles, tuple):
return [MapDataset(list(read(datafile))) for datafile in datafiles]
-train_ds, dev_ds, test_ds = load_dataset(datafiles=(
- './waybill_data/train.txt', './waybill_data/dev.txt', './waybill_data/test.txt'))
-
+train_ds, dev_ds, test_ds = load_dataset(datafiles=('./waybill_data/train.txt',
+ './waybill_data/dev.txt',
+ './waybill_data/test.txt'))
label_vocab = load_dict('./conf/tag.dic')
@@ -37,7 +38,8 @@ def read(data_path):
MODEL_NAME = "ernie-gram-zh"
tokenizer = ErnieGramTokenizer.from_pretrained(MODEL_NAME)
-trans_func = partial(convert_example, tokenizer=tokenizer, label_vocab=label_vocab)
+trans_func = partial(
+ convert_example, tokenizer=tokenizer, label_vocab=label_vocab)
train_ds.map(trans_func)
dev_ds.map(trans_func)
@@ -51,34 +53,27 @@ def read(data_path):
): fn(samples)
train_loader = paddle.io.DataLoader(
- dataset=train_ds,
- batch_size=200,
- return_list=True,
- collate_fn=batchify_fn)
+ dataset=train_ds, batch_size=200, return_list=True, collate_fn=batchify_fn)
dev_loader = paddle.io.DataLoader(
- dataset=dev_ds,
- batch_size=200,
- return_list=True,
- collate_fn=batchify_fn)
+ dataset=dev_ds, batch_size=200, return_list=True, collate_fn=batchify_fn)
test_loader = paddle.io.DataLoader(
- dataset=test_ds,
- batch_size=200,
- return_list=True,
- collate_fn=batchify_fn)
-
+ dataset=test_ds, batch_size=200, return_list=True, collate_fn=batchify_fn)
# Define the model netword and its loss
-model = ErnieGramForTokenClassification.from_pretrained("ernie-gram-zh", num_classes=len(label_vocab))
+model = ErnieGramForTokenClassification.from_pretrained(
+ "ernie-gram-zh", num_classes=len(label_vocab))
metric = ChunkEvaluator(label_list=label_vocab.keys(), suffix=True)
loss_fn = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
-optimizer = paddle.optimizer.AdamW(learning_rate=2e-5, parameters=model.parameters())
+optimizer = paddle.optimizer.AdamW(
+ learning_rate=2e-5, parameters=model.parameters())
step = 0
for epoch in range(10):
# Switch the model to training mode
model.train()
- for idx, (input_ids, token_type_ids, length, labels) in enumerate(train_loader):
+ for idx, (input_ids, token_type_ids, length,
+ labels) in enumerate(train_loader):
logits = model(input_ids, token_type_ids)
loss = paddle.mean(loss_fn(logits, labels))
loss.backward()
@@ -102,7 +97,3 @@ def read(data_path):
"The results have been saved in the file: %s, some examples are shown below: "
% file_path)
print("\n".join(preds[:10]))
-
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_erniegram_crf.py b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_erniegram_crf.py
old mode 100644
new mode 100755
index 74b2a5951..4262b3d89
--- a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_erniegram_crf.py
+++ b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/run_erniegram_crf.py
@@ -33,7 +33,8 @@ def convert_to_features(example, tokenizer, label_vocab):
labels = ['O'] + labels + ['O']
tokenized_input['labels'] = [label_vocab[x] for x in labels]
return tokenized_input['input_ids'], tokenized_input[
- 'token_type_ids'], tokenized_input['seq_len'], tokenized_input['labels']
+ 'token_type_ids'], tokenized_input['seq_len'], tokenized_input[
+ 'labels']
@paddle.no_grad()
@@ -69,8 +70,9 @@ def predict(model, data_loader, ds, label_vocab):
paddle.set_device('gpu')
# Create dataset, tokenizer and dataloader.
- train_ds, dev_ds, test_ds = load_dataset(datafiles=(
- './waybill_data/train.txt', './waybill_data/dev.txt', './waybill_data/test.txt'))
+ train_ds, dev_ds, test_ds = load_dataset(
+ datafiles=('./waybill_data/train.txt', './waybill_data/dev.txt',
+ './waybill_data/test.txt'))
label_vocab = load_dict('./conf/tag.dic')
tokenizer = ErnieGramTokenizer.from_pretrained('ernie-gram-zh')
@@ -129,7 +131,7 @@ def predict(model, data_loader, ds, label_vocab):
evaluate(model, metric, dev_loader)
paddle.save(model.state_dict(),
- './erniegram_crf_result/model_%d.pdparams' % step)
+ './erniegram_crf_result/model_%d.pdparams' % step)
preds = predict(model, test_loader, test_ds, label_vocab)
file_path = "ernie_crf_results.txt"
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/utils.py b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/utils.py
old mode 100644
new mode 100755
index 887db114a..6f9be9ae4
--- a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/utils.py
@@ -3,6 +3,7 @@
import paddle.nn.functional as F
from paddlenlp.data import Stack, Tuple, Pad
+
def load_dict(dict_path):
vocab = {}
i = 0
@@ -12,6 +13,7 @@ def load_dict(dict_path):
i += 1
return vocab
+
def convert_example(example, tokenizer, label_vocab):
tokens, labels = example
tokenized_input = tokenizer(
@@ -20,7 +22,9 @@ def convert_example(example, tokenizer, label_vocab):
labels = ['O'] + labels + ['O']
tokenized_input['labels'] = [label_vocab[x] for x in labels]
return tokenized_input['input_ids'], tokenized_input[
- 'token_type_ids'], tokenized_input['seq_len'], tokenized_input['labels']
+ 'token_type_ids'], tokenized_input['seq_len'], tokenized_input[
+ 'labels']
+
@paddle.no_grad()
def evaluate(model, metric, data_loader):
@@ -35,6 +39,7 @@ def evaluate(model, metric, data_loader):
print("eval precision: %f - recall: %f - f1: %f" %
(precision, recall, f1_score))
+
def predict(model, data_loader, ds, label_vocab):
pred_list = []
len_list = []
@@ -46,6 +51,7 @@ def predict(model, data_loader, ds, label_vocab):
preds = parse_decodes2(ds, pred_list, len_list, label_vocab)
return preds
+
def parse_decodes1(ds, decodes, lens, label_vocab):
decodes = [x for batch in decodes for x in batch]
lens = [x for batch in lens for x in batch]
@@ -72,6 +78,7 @@ def parse_decodes1(ds, decodes, lens, label_vocab):
[str((s, t)) for s, t in zip(sent_out, tags_out)]))
return outputs
+
def parse_decodes2(ds, decodes, lens, label_vocab):
decodes = [x for batch in decodes for x in batch]
lens = [x for batch in lens for x in batch]
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/waybill_data/dev.txt b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/waybill_data/dev.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/waybill_data/test.txt b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/waybill_data/test.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/waybill_data/train.txt b/Paddle_Industry_Practice_Sample_Library/Waybill_Information_Extraction/waybill_data/train.txt
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/README.md b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/data.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/data.py
old mode 100644
new mode 100755
index b9a44dd20..d8c947e8c
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/data.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/data.py
@@ -121,8 +121,8 @@ def data_prepare(path):
train_trigger, train_role = data_process(
os.path.join(path, "duee_train.json"))
dev_trigger, dev_role = data_process(os.path.join(path, "duee_dev.json"))
- test_trigger, test_role = data_process(os.path.join(
- path, "duee_test.json"))
+ test_trigger, test_role = data_process(
+ os.path.join(path, "duee_test.json"))
write_by_lines(os.path.join(trigger_path, "duee_train.tsv"), train_trigger)
write_by_lines(os.path.join(trigger_path, "duee_dev.tsv"), dev_trigger)
write_by_lines(os.path.join(trigger_path, "duee_test.tsv"), test_trigger)
@@ -135,22 +135,25 @@ def data_prepare(path):
def load_dict(dict_path):
tag2id, id2tag = {}, {}
with open(dict_path, "r", encoding="utf-8") as f:
- for idx, line in enumerate(f.readlines()):
- word = line.strip()
- id2tag[idx] = word
- tag2id[word] = idx
-
+ for idx, line in enumerate(f.readlines()):
+ word = line.strip()
+ id2tag[idx] = word
+ tag2id[word] = idx
+
return tag2id, id2tag
+
# load schema file
def load_schema(schema_path):
schema = {}
with open(schema_path, "r", encoding="utf-8") as f:
for line in f.readlines():
- event_des = json.loads(line)
- schema[event_des["event_type"]] = [r["role"] for r in event_des["role_list"]]
+ event_des = json.loads(line)
+ schema[event_des[
+ "event_type"]] = [r["role"] for r in event_des["role_list"]]
return schema
+
# load data from local file, which will be used for loading data with paddlenlp
def read(data_path):
with open(data_path, "r", encoding="utf-8") as f:
@@ -159,17 +162,28 @@ def read(data_path):
words, labels = line.strip().split("\t")
words = words.split("\002")
labels = labels.split("\002")
- yield {"tokens": words, "labels":labels}
+ yield {"tokens": words, "labels": labels}
-def convert_example_to_features(example, tokenizer, tag2id, max_seq_length=512, pad_default_tag="O", is_test=False):
-
- features = tokenizer(example["tokens"], is_split_into_words=True, max_seq_len=max_seq_length, return_length=True)
+def convert_example_to_features(example,
+ tokenizer,
+ tag2id,
+ max_seq_length=512,
+ pad_default_tag="O",
+ is_test=False):
+
+ features = tokenizer(
+ example["tokens"],
+ is_split_into_words=True,
+ max_seq_len=max_seq_length,
+ return_length=True)
if is_test:
- return features["input_ids"], features["token_type_ids"], features["seq_len"]
+ return features["input_ids"], features["token_type_ids"], features[
+ "seq_len"]
- tag_ids = [tag2id[tag] for tag in example["labels"][:(max_seq_length-2)]]
+ tag_ids = [tag2id[tag] for tag in example["labels"][:(max_seq_length - 2)]]
tag_ids = [tag2id[pad_default_tag]] + tag_ids + [tag2id[pad_default_tag]]
assert len(features["input_ids"]) == len(tag_ids)
-
- return features["input_ids"], features["token_type_ids"], features["seq_len"], tag_ids
\ No newline at end of file
+
+ return features["input_ids"], features["token_type_ids"], features[
+ "seq_len"], tag_ids
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/evaluate.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/evaluate.py
old mode 100644
new mode 100755
index 13c7c03c0..9c35f936d
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/evaluate.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/evaluate.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import argparse
import paddle
from functools import partial
@@ -25,9 +24,7 @@
from paddlenlp.data import Stack, Pad, Tuple
-
def evaluate(model, data_loader, metric):
-
model.eval()
metric.reset()
@@ -36,16 +33,13 @@ def evaluate(model, data_loader, metric):
logits = model(input_ids, token_type_ids)
preds = paddle.argmax(logits, axis=-1)
n_infer, n_label, n_correct = metric.compute(seq_lens, preds, tag_ids)
- metric.update(n_infer.numpy(), n_label.numpy(), n_correct.numpy())
+ metric.update(n_infer.numpy(), n_label.numpy(), n_correct.numpy())
precision, recall, f1_score = metric.accumulate()
-
- return precision, recall, f1_score
-
+ return precision, recall, f1_score
-
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default="trigger", help="The trigger or role model which you wanna evaluate")
@@ -61,7 +55,7 @@ def evaluate(model, data_loader, metric):
model_name = "ernie-1.0"
tag2id, id2tag = load_dict(args.tag_path)
dev_ds = load_dataset(read, data_path=args.dev_path, lazy=False)
-
+
tokenizer = ErnieTokenizer.from_pretrained(model_name)
trans_func = partial(convert_example_to_features, tokenizer=tokenizer, tag2id=tag2id, max_seq_length=256, pad_default_tag="O", is_test=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
@@ -84,11 +78,7 @@ def evaluate(model, data_loader, metric):
event_model.load_dict(loaded_state_dict)
metric = ChunkEvaluator(label_list=tag2id.keys(), suffix=False)
-
+
# evalute on dev data
precision, recall, f1_score = evaluate(event_model, dev_loader, metric)
print(f'{args.model_name} evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1_score:.5f}')
-
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/model.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/model.py
old mode 100644
new mode 100755
index ffbcfb1c8..9740bb430
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/model.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/model.py
@@ -12,28 +12,31 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import paddle.nn as nn
+
class ErnieForTokenClassification(paddle.nn.Layer):
def __init__(self, ernie, num_classes=2, dropout=None):
super(ErnieForTokenClassification, self).__init__()
self.num_classes = num_classes
self.ernie = ernie
- self.dropout = nn.Dropout(dropout if dropout is not None else self.ernie.config["hidden_dropout_prob"])
- self.classifier = nn.Linear(self.ernie.config["hidden_size"], num_classes)
-
-
- def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None):
- sequence_output, _ = self.ernie(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask)
+ self.dropout = nn.Dropout(dropout if dropout is not None else
+ self.ernie.config["hidden_dropout_prob"])
+ self.classifier = nn.Linear(self.ernie.config["hidden_size"],
+ num_classes)
+
+ def forward(self,
+ input_ids,
+ token_type_ids=None,
+ position_ids=None,
+ attention_mask=None):
+ sequence_output, _ = self.ernie(
+ input_ids,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ attention_mask=attention_mask)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
return logits
-
-
-
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/predict.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/predict.py
old mode 100644
new mode 100755
index 7bfadf23c..d8d7def99
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/predict.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import argparse
import paddle
from functools import partial
@@ -25,15 +24,21 @@
from paddlenlp.data import Stack, Pad, Tuple
from seqeval.metrics.sequence_labeling import get_entities
+
def format_print(events):
for idx, event in enumerate(events):
- print(f"event{idx} - event_type:{event['event_type']}, trigger:{event['trigger']}")
+ print(
+ f"event{idx} - event_type:{event['event_type']}, trigger:{event['trigger']}"
+ )
for argument in event["arguments"]:
- print(f"role_type:{argument['role']}, argument:{argument['argument']} ")
+ print(
+ f"role_type:{argument['role']}, argument:{argument['argument']} "
+ )
print()
-def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, schema):
+def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag,
+ schema):
trigger_model.eval()
role_model.eval()
@@ -43,18 +48,25 @@ def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, s
if input_text == "quit":
break
splited_input_text = list(input_text.strip())
- features = tokenizer(splited_input_text, is_split_into_words=True, max_seq_len=args.max_seq_len, return_length=True)
+ features = tokenizer(
+ splited_input_text,
+ is_split_into_words=True,
+ max_seq_len=args.max_seq_len,
+ return_length=True)
input_ids = paddle.to_tensor(features["input_ids"]).unsqueeze(0)
- token_type_ids = paddle.to_tensor(features["token_type_ids"]).unsqueeze(0)
+ token_type_ids = paddle.to_tensor(features[
+ "token_type_ids"]).unsqueeze(0)
seq_len = features["seq_len"]
-
+
trigger_logits = trigger_model(input_ids, token_type_ids)
- trigger_preds = paddle.argmax(trigger_logits, axis=-1).numpy()[0][1:(seq_len-1)]
+ trigger_preds = paddle.argmax(
+ trigger_logits, axis=-1).numpy()[0][1:(seq_len - 1)]
trigger_preds = [trigger_id2tag[idx] for idx in trigger_preds]
trigger_entities = get_entities(trigger_preds, suffix=False)
-
+
role_logits = role_model(input_ids, token_type_ids)
- role_preds = paddle.argmax(role_logits, axis=-1).numpy()[0][1:(seq_len-1)]
+ role_preds = paddle.argmax(
+ role_logits, axis=-1).numpy()[0][1:(seq_len - 1)]
role_preds = [role_id2tag[idx] for idx in role_preds]
role_entities = get_entities(role_preds, suffix=False)
@@ -65,26 +77,27 @@ def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, s
if event_type in visited:
continue
visited.add(event_type)
- events.append({"event_type":event_type, "trigger":"".join(splited_input_text[start:end+1]), "arguments":[]})
-
+ events.append({
+ "event_type": event_type,
+ "trigger": "".join(splited_input_text[start:end + 1]),
+ "arguments": []
+ })
+
for event in events:
role_list = schema[event["event_type"]]
for role_entity in role_entities:
role_type, start, end = role_entity
if role_type not in role_list:
continue
- event["arguments"].append({"role":role_type, "argument":"".join(splited_input_text[start:end+1])})
-
- format_print(events)
-
-
+ event["arguments"].append({
+ "role": role_type,
+ "argument": "".join(splited_input_text[start:end + 1])
+ })
+ format_print(events)
-
-
-
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--trigger_model_path", type=str, default=None, help="trigger model path that you saved")
@@ -93,13 +106,13 @@ def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, s
parser.add_argument("--role_tag_path", type=str, default=None, help="role dict path")
parser.add_argument("--schema_path", type=str, default=None, help="event schema path")
parser.add_argument("--max_seq_len", type=int, default=512, help="max seq length")
-
+
args = parser.parse_args()
# yapf: enbale
# load schema
schema = load_schema(args.schema_path)
-
+
# load dict
model_name = "ernie-1.0"
trigger_tag2id, trigger_id2tag = load_dict(args.trigger_tag_path)
@@ -116,4 +129,3 @@ def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, s
# predict
predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, schema)
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/run_evaluate.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/run_evaluate.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/run_predict.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/run_predict.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/run_train.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/run_train.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/train.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/train.py
old mode 100644
new mode 100755
index 876e608ab..08224b31c
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/train.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import ast
import argparse
@@ -54,7 +53,6 @@
# yapf: enable
-
def train():
# set running envir
paddle.set_device(args.device)
@@ -68,31 +66,40 @@ def train():
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
-
model_name = "ernie-1.0"
-
+
# load and process data
tag2id, id2tag = load_dict(args.tag_path)
train_ds = load_dataset(read, data_path=args.train_path, lazy=False)
dev_ds = load_dataset(read, data_path=args.dev_path, lazy=False)
tokenizer = ErnieTokenizer.from_pretrained(model_name)
- trans_func = partial(convert_example_to_features, tokenizer=tokenizer, tag2id=tag2id, max_seq_length=args.max_seq_len, pad_default_tag="O", is_test=False)
+ trans_func = partial(
+ convert_example_to_features,
+ tokenizer=tokenizer,
+ tag2id=tag2id,
+ max_seq_length=args.max_seq_len,
+ pad_default_tag="O",
+ is_test=False)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
- Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
- Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
- Stack(), # seq len
- Pad(axis=0, pad_val=-1) # tag_ids
+ Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
+ Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
+ Stack(), # seq len
+ Pad(axis=0, pad_val=-1) # tag_ids
): fn(samples)
-
- train_batch_sampler = paddle.io.DistributedBatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
- dev_batch_sampler = paddle.io.DistributedBatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
- train_loader = paddle.io.DataLoader(train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
- dev_loader = paddle.io.DataLoader(dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn)
+
+ train_batch_sampler = paddle.io.DistributedBatchSampler(
+ train_ds, batch_size=args.batch_size, shuffle=True)
+ dev_batch_sampler = paddle.io.DistributedBatchSampler(
+ dev_ds, batch_size=args.batch_size, shuffle=False)
+ train_loader = paddle.io.DataLoader(
+ train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
+ dev_loader = paddle.io.DataLoader(
+ dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn)
# configure model training
ernie = ErnieModel.from_pretrained(model_name)
@@ -100,42 +107,63 @@ def train():
event_model = paddle.DataParallel(event_model)
num_training_steps = len(train_loader) * args.num_epoch
- lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_proportion)
- decay_params = [p.name for n, p in event_model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
- optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, parameters=event_model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params)
-
+ lr_scheduler = LinearDecayWithWarmup(
+ args.learning_rate, num_training_steps, args.warmup_proportion)
+ decay_params = [
+ p.name for n, p in event_model.named_parameters()
+ if not any(nd in n for nd in ["bias", "norm"])
+ ]
+ optimizer = paddle.optimizer.AdamW(
+ learning_rate=lr_scheduler,
+ parameters=event_model.parameters(),
+ weight_decay=args.weight_decay,
+ apply_decay_param_fun=lambda x: x in decay_params)
+
metric = ChunkEvaluator(label_list=tag2id.keys(), suffix=False)
-
+
# start to train event_model
- global_step, best_f1 = 0, 0.
+ global_step, best_f1 = 0, 0.
event_model.train()
- for epoch in range(1, args.num_epoch+1):
+ for epoch in range(1, args.num_epoch + 1):
for batch_data in train_loader:
input_ids, token_type_ids, seq_len, tag_ids = batch_data
# logits: [batch_size, seq_len, num_tags] --> [batch_size*seq_len, num_tags]
- logits = event_model(input_ids, token_type_ids).reshape([-1, len(tag2id)])
- loss = paddle.mean(F.cross_entropy(logits, tag_ids.reshape([-1]), ignore_index=-1))
-
+ logits = event_model(input_ids, token_type_ids).reshape(
+ [-1, len(tag2id)])
+ loss = paddle.mean(
+ F.cross_entropy(
+ logits, tag_ids.reshape([-1]), ignore_index=-1))
+
loss.backward()
lr_scheduler.step()
optimizer.step()
optimizer.clear_grad()
-
+
if global_step > 0 and global_step % args.log_step == 0 and rank == 0:
- print(f"{args.model_name} - epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}")
- if global_step > 0 and global_step % args.eval_step == 0 and rank == 0:
- precision, recall, f1_score = evaluate(event_model, dev_loader, metric)
+ print(
+ f"{args.model_name} - epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}"
+ )
+ if global_step > 0 and global_step % args.eval_step == 0 and rank == 0:
+ precision, recall, f1_score = evaluate(event_model, dev_loader,
+ metric)
event_model.train()
if f1_score > best_f1:
- print(f"best F1 performence has been updated: {best_f1:.5f} --> {f1_score:.5f}")
+ print(
+ f"best F1 performence has been updated: {best_f1:.5f} --> {f1_score:.5f}"
+ )
best_f1 = f1_score
- paddle.save(event_model.state_dict(), f"{args.checkpoint}/{args.model_name}_best.pdparams")
- print(f'{args.model_name} evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1_score:.5f} current best {best_f1:.5f}')
+ paddle.save(
+ event_model.state_dict(),
+ f"{args.checkpoint}/{args.model_name}_best.pdparams")
+ print(
+ f'{args.model_name} evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1_score:.5f} current best {best_f1:.5f}'
+ )
global_step += 1
if rank == 0:
- paddle.save(event_model.state_dict(), f"{args.checkpoint}/{args.model_name}_final.pdparams")
-
+ paddle.save(event_model.state_dict(),
+ f"{args.checkpoint}/{args.model_name}_final.pdparams")
+
-if __name__=="__main__":
+if __name__ == "__main__":
train()
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/utils.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/utils.py
old mode 100644
new mode 100755
index 577873daa..d9f6a110a
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/utils.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import random
import numpy as np
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/README.md b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/data.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/data.py
old mode 100644
new mode 100755
index 9097fc5fb..d8c947e8c
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/data.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/data.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import json
@@ -122,8 +121,8 @@ def data_prepare(path):
train_trigger, train_role = data_process(
os.path.join(path, "duee_train.json"))
dev_trigger, dev_role = data_process(os.path.join(path, "duee_dev.json"))
- test_trigger, test_role = data_process(os.path.join(
- path, "duee_test.json"))
+ test_trigger, test_role = data_process(
+ os.path.join(path, "duee_test.json"))
write_by_lines(os.path.join(trigger_path, "duee_train.tsv"), train_trigger)
write_by_lines(os.path.join(trigger_path, "duee_dev.tsv"), dev_trigger)
write_by_lines(os.path.join(trigger_path, "duee_test.tsv"), test_trigger)
@@ -136,22 +135,25 @@ def data_prepare(path):
def load_dict(dict_path):
tag2id, id2tag = {}, {}
with open(dict_path, "r", encoding="utf-8") as f:
- for idx, line in enumerate(f.readlines()):
- word = line.strip()
- id2tag[idx] = word
- tag2id[word] = idx
-
+ for idx, line in enumerate(f.readlines()):
+ word = line.strip()
+ id2tag[idx] = word
+ tag2id[word] = idx
+
return tag2id, id2tag
+
# load schema file
def load_schema(schema_path):
schema = {}
with open(schema_path, "r", encoding="utf-8") as f:
for line in f.readlines():
- event_des = json.loads(line)
- schema[event_des["event_type"]] = [r["role"] for r in event_des["role_list"]]
+ event_des = json.loads(line)
+ schema[event_des[
+ "event_type"]] = [r["role"] for r in event_des["role_list"]]
return schema
+
# load data from local file, which will be used for loading data with paddlenlp
def read(data_path):
with open(data_path, "r", encoding="utf-8") as f:
@@ -160,17 +162,28 @@ def read(data_path):
words, labels = line.strip().split("\t")
words = words.split("\002")
labels = labels.split("\002")
- yield {"tokens": words, "labels":labels}
+ yield {"tokens": words, "labels": labels}
+
+def convert_example_to_features(example,
+ tokenizer,
+ tag2id,
+ max_seq_length=512,
+ pad_default_tag="O",
+ is_test=False):
-def convert_example_to_features(example, tokenizer, tag2id, max_seq_length=512, pad_default_tag="O", is_test=False):
-
- features = tokenizer(example["tokens"], is_split_into_words=True, max_seq_len=max_seq_length, return_length=True)
+ features = tokenizer(
+ example["tokens"],
+ is_split_into_words=True,
+ max_seq_len=max_seq_length,
+ return_length=True)
if is_test:
- return features["input_ids"], features["token_type_ids"], features["seq_len"]
+ return features["input_ids"], features["token_type_ids"], features[
+ "seq_len"]
- tag_ids = [tag2id[tag] for tag in example["labels"][:(max_seq_length-2)]]
+ tag_ids = [tag2id[tag] for tag in example["labels"][:(max_seq_length - 2)]]
tag_ids = [tag2id[pad_default_tag]] + tag_ids + [tag2id[pad_default_tag]]
assert len(features["input_ids"]) == len(tag_ids)
-
- return features["input_ids"], features["token_type_ids"], features["seq_len"], tag_ids
+
+ return features["input_ids"], features["token_type_ids"], features[
+ "seq_len"], tag_ids
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/evaluate.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/evaluate.py
old mode 100644
new mode 100755
index 37fa0ccc9..60d408fb9
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/evaluate.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/evaluate.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import argparse
import paddle
from functools import partial
@@ -25,9 +24,7 @@
from paddlenlp.data import Stack, Pad, Tuple
-
def evaluate(model, data_loader, metric):
-
model.eval()
metric.reset()
@@ -36,17 +33,15 @@ def evaluate(model, data_loader, metric):
logits = model(input_ids, token_type_ids)
_, pred_paths = model.viterbi_decoder(logits, seq_lens)
#preds = paddle.argmax(logits, axis=-1)
- n_infer, n_label, n_correct = metric.compute(seq_lens, pred_paths, tag_ids)
- metric.update(n_infer.numpy(), n_label.numpy(), n_correct.numpy())
+ n_infer, n_label, n_correct = metric.compute(seq_lens, pred_paths,
+ tag_ids)
+ metric.update(n_infer.numpy(), n_label.numpy(), n_correct.numpy())
precision, recall, f1_score = metric.accumulate()
-
- return precision, recall, f1_score
-
+ return precision, recall, f1_score
-
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default="trigger", help="The trigger or role model which you wanna evaluate")
@@ -62,7 +57,7 @@ def evaluate(model, data_loader, metric):
model_name = "ernie-1.0"
tag2id, id2tag = load_dict(args.tag_path)
dev_ds = load_dataset(read, data_path=args.dev_path, lazy=False)
-
+
tokenizer = ErnieTokenizer.from_pretrained(model_name)
trans_func = partial(convert_example_to_features, tokenizer=tokenizer, tag2id=tag2id, max_seq_length=256, pad_default_tag="O", is_test=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
@@ -85,11 +80,7 @@ def evaluate(model, data_loader, metric):
event_model.load_dict(loaded_state_dict)
metric = ChunkEvaluator(label_list=tag2id.keys(), suffix=False)
-
+
# evalute on dev data
precision, recall, f1_score = evaluate(event_model, dev_loader, metric)
print(f'{args.model_name} evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1_score:.5f}')
-
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/model.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/model.py
old mode 100644
new mode 100755
index 43b72b76c..ea7609fbd
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/model.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/model.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import paddle.nn as nn
import paddle_crf as crf
@@ -23,23 +22,32 @@ def __init__(self, ernie, num_classes=2, dropout=None):
super(EventExtractionModel, self).__init__()
self.num_classes = num_classes
self.ernie = ernie
- self.dropout = nn.Dropout(dropout if dropout is not None else self.ernie.config["hidden_dropout_prob"])
- self.classifier = nn.Linear(self.ernie.config["hidden_size"], num_classes+2) # add start and stop tag
-
- self.crf = crf.LinearChainCrf(num_classes, crf_lr=0.001, with_start_stop_tag=True)
+ self.dropout = nn.Dropout(dropout if dropout is not None else
+ self.ernie.config["hidden_dropout_prob"])
+ self.classifier = nn.Linear(self.ernie.config["hidden_size"],
+ num_classes + 2) # add start and stop tag
+
+ self.crf = crf.LinearChainCrf(
+ num_classes, crf_lr=0.001, with_start_stop_tag=True)
self.crf_loss = crf.LinearChainCrfLoss(self.crf)
self.viterbi_decoder = crf.ViterbiDecoder(self.crf.transitions)
- def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None):
- sequence_output, _ = self.ernie(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask)
+ def forward(self,
+ input_ids,
+ token_type_ids=None,
+ position_ids=None,
+ attention_mask=None):
+ sequence_output, _ = self.ernie(
+ input_ids,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ attention_mask=attention_mask)
sequence_output = self.dropout(sequence_output)
emissions = self.classifier(sequence_output)
return emissions
-
+
def get_crf_loss(self, emissions, lens, tags):
loss = self.crf_loss(emissions, lens, tags)
loss = paddle.mean(loss)
return loss
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/paddle_crf.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/paddle_crf.py
old mode 100644
new mode 100755
index 52cd9985d..2041e5e9d
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/paddle_crf.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/paddle_crf.py
@@ -145,8 +145,9 @@ def _point_score(self, inputs, labels, lengths):
offsets += paddle.unsqueeze(self._get_seq_index(seq_len) * n_labels, 0)
flattened_tag_indices = paddle.reshape(offsets + labels, [-1])
- scores = paddle.gather(flattened_inputs, flattened_tag_indices).reshape(
- [batch_size, seq_len])
+ scores = paddle.gather(flattened_inputs,
+ flattened_tag_indices).reshape(
+ [batch_size, seq_len])
mask = paddle.cast(
sequence_mask(
@@ -351,11 +352,11 @@ def forward(self, inputs, lengths):
lengths_np = lengths.numpy()
batch_path = []
max_len = 0
-
+
for batch_id in range(batch_size):
best_last_tag = last_ids[batch_id]
path = [best_last_tag]
- for hist in reversed(historys[:(lengths_np[batch_id]-1)]):
+ for hist in reversed(historys[:(lengths_np[batch_id] - 1)]):
best_last_tag = hist[batch_id][best_last_tag]
path.append(best_last_tag)
@@ -364,7 +365,9 @@ def forward(self, inputs, lengths):
# Pad to the max sequence length, so that the ChunkEvaluator can compute it
batch_path.append(path)
# padding to the same length
- batch_path = [path + [0] * (max_len - len(path)) for path in batch_path]
+ batch_path = [
+ path + [0] * (max_len - len(path)) for path in batch_path
+ ]
batch_path = paddle.to_tensor(batch_path)
return scores, batch_path
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/predict.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/predict.py
old mode 100644
new mode 100755
index 6f1ed1c70..d7b70bd11
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/predict.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import argparse
import paddle
from functools import partial
@@ -25,15 +24,21 @@
from paddlenlp.data import Stack, Pad, Tuple
from seqeval.metrics.sequence_labeling import get_entities
+
def format_print(events):
for idx, event in enumerate(events):
- print(f"event{idx} - event_type:{event['event_type']}, trigger:{event['trigger']}")
+ print(
+ f"event{idx} - event_type:{event['event_type']}, trigger:{event['trigger']}"
+ )
for argument in event["arguments"]:
- print(f"role_type:{argument['role']}, argument:{argument['argument']} ")
+ print(
+ f"role_type:{argument['role']}, argument:{argument['argument']} "
+ )
print()
-def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, schema):
+def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag,
+ schema):
trigger_model.eval()
role_model.eval()
@@ -43,20 +48,27 @@ def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, s
if input_text == "quit":
break
splited_input_text = list(input_text.strip())
- features = tokenizer(splited_input_text, is_split_into_words=True, max_seq_len=args.max_seq_len, return_length=True)
+ features = tokenizer(
+ splited_input_text,
+ is_split_into_words=True,
+ max_seq_len=args.max_seq_len,
+ return_length=True)
input_ids = paddle.to_tensor(features["input_ids"]).unsqueeze(0)
- token_type_ids = paddle.to_tensor(features["token_type_ids"]).unsqueeze(0)
+ token_type_ids = paddle.to_tensor(features[
+ "token_type_ids"]).unsqueeze(0)
seq_len = features["seq_len"]
-
+
trigger_logits = trigger_model(input_ids, token_type_ids)
- _, trigger_preds = trigger_model.viterbi_decoder(trigger_logits, paddle.to_tensor([seq_len]))
- trigger_preds = trigger_preds.numpy()[0][1:(seq_len-1)]
+ _, trigger_preds = trigger_model.viterbi_decoder(
+ trigger_logits, paddle.to_tensor([seq_len]))
+ trigger_preds = trigger_preds.numpy()[0][1:(seq_len - 1)]
trigger_preds = [trigger_id2tag[idx] for idx in trigger_preds]
trigger_entities = get_entities(trigger_preds, suffix=False)
-
+
role_logits = role_model(input_ids, token_type_ids)
- _, role_preds = role_model.viterbi_decoder(role_logits, paddle.to_tensor([seq_len]))
- role_preds = role_preds.numpy()[0][1:(seq_len-1)]
+ _, role_preds = role_model.viterbi_decoder(role_logits,
+ paddle.to_tensor([seq_len]))
+ role_preds = role_preds.numpy()[0][1:(seq_len - 1)]
role_preds = [role_id2tag[idx] for idx in role_preds]
role_entities = get_entities(role_preds, suffix=False)
@@ -67,26 +79,27 @@ def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, s
if event_type in visited:
continue
visited.add(event_type)
- events.append({"event_type":event_type, "trigger":"".join(splited_input_text[start:end+1]), "arguments":[]})
-
+ events.append({
+ "event_type": event_type,
+ "trigger": "".join(splited_input_text[start:end + 1]),
+ "arguments": []
+ })
+
for event in events:
role_list = schema[event["event_type"]]
for role_entity in role_entities:
role_type, start, end = role_entity
if role_type not in role_list:
continue
- event["arguments"].append({"role":role_type, "argument":"".join(splited_input_text[start:end+1])})
-
- format_print(events)
-
-
+ event["arguments"].append({
+ "role": role_type,
+ "argument": "".join(splited_input_text[start:end + 1])
+ })
+ format_print(events)
-
-
-
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--trigger_model_path", type=str, default=None, help="trigger model path that you saved")
@@ -95,13 +108,13 @@ def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, s
parser.add_argument("--role_tag_path", type=str, default=None, help="role dict path")
parser.add_argument("--schema_path", type=str, default=None, help="event schema path")
parser.add_argument("--max_seq_len", type=int, default=512, help="max seq length")
-
+
args = parser.parse_args()
# yapf: enbale
# load schema
schema = load_schema(args.schema_path)
-
+
# load dict
model_name = "ernie-1.0"
trigger_tag2id, trigger_id2tag = load_dict(args.trigger_tag_path)
@@ -118,4 +131,3 @@ def predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, s
# predict
predict(trigger_model, role_model, tokenizer, trigger_id2tag, role_id2tag, schema)
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/run_evaluate.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/run_evaluate.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/run_predict.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/run_predict.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/run_train.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/run_train.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/train.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/train.py
old mode 100644
new mode 100755
index 742c6efce..821b97cb0
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/train.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import ast
import argparse
@@ -54,7 +53,6 @@
# yapf: enable
-
def train():
# set running envir
paddle.set_device(args.device)
@@ -68,74 +66,101 @@ def train():
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
-
model_name = "ernie-1.0"
-
+
# load and process data
tag2id, id2tag = load_dict(args.tag_path)
train_ds = load_dataset(read, data_path=args.train_path, lazy=False)
dev_ds = load_dataset(read, data_path=args.dev_path, lazy=False)
tokenizer = ErnieTokenizer.from_pretrained(model_name)
- trans_func = partial(convert_example_to_features, tokenizer=tokenizer, tag2id=tag2id, max_seq_length=args.max_seq_len, pad_default_tag="O", is_test=False)
+ trans_func = partial(
+ convert_example_to_features,
+ tokenizer=tokenizer,
+ tag2id=tag2id,
+ max_seq_length=args.max_seq_len,
+ pad_default_tag="O",
+ is_test=False)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
- Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
- Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
- Stack(), # seq len
- Pad(axis=0, pad_val=-1) # tag_ids
+ Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
+ Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
+ Stack(), # seq len
+ Pad(axis=0, pad_val=-1) # tag_ids
): fn(samples)
-
- train_batch_sampler = paddle.io.DistributedBatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
- dev_batch_sampler = paddle.io.DistributedBatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
- train_loader = paddle.io.DataLoader(train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
- dev_loader = paddle.io.DataLoader(dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn)
+
+ train_batch_sampler = paddle.io.DistributedBatchSampler(
+ train_ds, batch_size=args.batch_size, shuffle=True)
+ dev_batch_sampler = paddle.io.DistributedBatchSampler(
+ dev_ds, batch_size=args.batch_size, shuffle=False)
+ train_loader = paddle.io.DataLoader(
+ train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
+ dev_loader = paddle.io.DataLoader(
+ dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn)
# configure model training
ernie = ErnieModel.from_pretrained(model_name)
event_model = EventExtractionModel(ernie, num_classes=len(tag2id))
- set_seed(args.seed)
+ set_seed(args.seed)
num_training_steps = len(train_loader) * args.num_epoch
- lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_proportion)
- decay_params = [p.name for n, p in event_model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
- optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, parameters=event_model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params)
-
+ lr_scheduler = LinearDecayWithWarmup(
+ args.learning_rate, num_training_steps, args.warmup_proportion)
+ decay_params = [
+ p.name for n, p in event_model.named_parameters()
+ if not any(nd in n for nd in ["bias", "norm"])
+ ]
+ optimizer = paddle.optimizer.AdamW(
+ learning_rate=lr_scheduler,
+ parameters=event_model.parameters(),
+ weight_decay=args.weight_decay,
+ apply_decay_param_fun=lambda x: x in decay_params)
+
metric = ChunkEvaluator(label_list=tag2id.keys(), suffix=False)
-
+
# start to train event_model
- global_step, best_f1 = 0, 0.
+ global_step, best_f1 = 0, 0.
event_model.train()
- for epoch in range(1, args.num_epoch+1):
+ for epoch in range(1, args.num_epoch + 1):
for batch_data in train_loader:
input_ids, token_type_ids, seq_lens, tag_ids = batch_data
logits = event_model(input_ids, token_type_ids)
- loss = event_model.get_crf_loss(logits, seq_lens, tag_ids)
+ loss = event_model.get_crf_loss(logits, seq_lens, tag_ids)
loss.backward()
lr_scheduler.step()
optimizer.step()
optimizer.clear_grad()
-
+
if global_step > 0 and global_step % args.log_step == 0 and rank == 0:
- print(f"{args.model_name} - epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}")
- if global_step > 0 and global_step % args.eval_step == 0 and rank == 0:
- precision, recall, f1_score = evaluate(event_model, dev_loader, metric)
+ print(
+ f"{args.model_name} - epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}"
+ )
+ if global_step > 0 and global_step % args.eval_step == 0 and rank == 0:
+ precision, recall, f1_score = evaluate(event_model, dev_loader,
+ metric)
event_model.train()
if f1_score > best_f1:
- print(f"best F1 performence has been updated: {best_f1:.5f} --> {f1_score:.5f}")
+ print(
+ f"best F1 performence has been updated: {best_f1:.5f} --> {f1_score:.5f}"
+ )
best_f1 = f1_score
- paddle.save(event_model.state_dict(), f"{args.checkpoint}/{args.model_name}_best.pdparams")
- print(f'{args.model_name} evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1_score:.5f} current best {best_f1:.5f}')
+ paddle.save(
+ event_model.state_dict(),
+ f"{args.checkpoint}/{args.model_name}_best.pdparams")
+ print(
+ f'{args.model_name} evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1_score:.5f} current best {best_f1:.5f}'
+ )
global_step += 1
if rank == 0:
- paddle.save(event_model.state_dict(), f"{args.checkpoint}/{args.model_name}_final.pdparams")
-
+ paddle.save(event_model.state_dict(),
+ f"{args.checkpoint}/{args.model_name}_final.pdparams")
+
-if __name__=="__main__":
+if __name__ == "__main__":
train()
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/utils.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/utils.py
old mode 100644
new mode 100755
index 577873daa..d9f6a110a
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie_with_crf/utils.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import random
import numpy as np
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/README.md b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/config.yaml b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/config.yaml
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py
old mode 100644
new mode 100755
index dc4757267..c55d98c65
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py
@@ -12,13 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import json
from collections import OrderedDict
from paddle.io import Dataset
import numpy as np
+
class ATISDataset(Dataset):
def __init__(self, path, vocab_path, intent_path, slot_path):
self.examples = self.load_data(path)
@@ -26,11 +26,10 @@ def __init__(self, path, vocab_path, intent_path, slot_path):
self.intent2id, self.id2intent = self.load_dict(intent_path)
self.slot2id, self.id2slot = self.load_dict(slot_path)
-
def __getitem__(self, idx):
example = self.examples[idx]
tokens, tags, intent = self.convert_example_to_id(example)
-
+
return np.array(tokens), np.array(tags), intent, len(tokens)
def __len__(self):
@@ -48,7 +47,6 @@ def num_intents(self):
def num_slots(self):
return len(self.slot2id)
-
def convert_example_to_id(self, example):
tokens = example["text"].split()
tags = example["tag"].split()
@@ -64,12 +62,10 @@ def load_dict(self, dict_path):
with open(dict_path, "r", encoding="utf-8") as f:
words = [word.strip() for word in f.readlines()]
dict2id = dict(zip(words, range(len(words))))
- id2dict = {v:k for k,v in dict2id.items()}
+ id2dict = {v: k for k, v in dict2id.items()}
return dict2id, id2dict
-
-
def _split_with_id(self, text, start=0):
word2sid = OrderedDict()
word = ""
@@ -80,9 +76,11 @@ def _split_with_id(self, text, start=0):
else:
word += text[i]
- if (i < len(text) - 1 and text[i + 1] == " ") or i == len(text) - 1:
+ if (i < len(text) - 1 and
+ text[i + 1] == " ") or i == len(text) - 1:
# get whole word
- key = str(i - len(word) + 1 + start) + "_" + str(i + start) + "_" + word
+ key = str(i - len(word) + 1 + start) + "_" + str(
+ i + start) + "_" + word
word2sid[key] = count
count += 1
word = ""
@@ -103,7 +101,8 @@ def load_data(self, path):
tags = ['O'] * len(splited_text)
word2sid = self._split_with_id(raw_example["text"])
for entity in raw_example["entities"]:
- start, end, value, entity_name = entity["start"], entity["end"] - 1, entity["value"], entity["entity"]
+ start, end, value, entity_name = entity["start"], entity[
+ "end"] - 1, entity["value"], entity["entity"]
entity2sid = self._split_with_id(value, start=start)
for i, word in enumerate(entity2sid.keys()):
if i == 0:
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/evaluate.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/evaluate.py
old mode 100644
new mode 100755
index 0a9619a66..6162aa837
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/evaluate.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/evaluate.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import yaml
import paddle
@@ -24,6 +23,7 @@
parser = argparse.ArgumentParser(description="processing input prams.")
+
def collate_fn(batch, token_pad_val=0, tag_pad_val=0):
token_list, tag_list, intent_list, len_list = [], [], [], []
for tokens, tags, intent, len_ in batch:
@@ -35,30 +35,45 @@ def collate_fn(batch, token_pad_val=0, tag_pad_val=0):
# padding sequence
max_len = max(map(len, token_list))
for i in range(len(token_list)):
- token_list[i] = token_list[i] + [token_pad_val] * (max_len-len(token_list[i]))
- tag_list[i] = tag_list[i] + [tag_pad_val] * (max_len - len(tag_list[i]))
+ token_list[i] = token_list[i] + [token_pad_val] * (max_len -
+ len(token_list[i]))
+ tag_list[i] = tag_list[i] + [tag_pad_val] * (max_len - len(tag_list[i])
+ )
+
+ return paddle.to_tensor(token_list), paddle.to_tensor(
+ tag_list), paddle.to_tensor(intent_list), paddle.to_tensor(len_list)
- return paddle.to_tensor(token_list), paddle.to_tensor(tag_list), paddle.to_tensor(intent_list), paddle.to_tensor(len_list)
def evaluate(jointModel=None, test_set=None, args=None):
jointModel.eval()
- test_loader = DataLoader(test_set, batch_size=args["batch_size"], shuffle=False, drop_last=False, collate_fn=collate_fn)
+ test_loader = DataLoader(
+ test_set,
+ batch_size=args["batch_size"],
+ shuffle=False,
+ drop_last=False,
+ collate_fn=collate_fn)
slot_metric = SeqEntityScore(test_set.id2slot)
intent_metric = SingleClassificationScore(test_set.id2intent)
-
+
for step, batch in enumerate(test_loader()):
batch_tokens, batch_tags, batch_intents, batch_lens = batch
emissions, intent_logits = jointModel(batch_tokens, batch_lens)
_, pred_paths = jointModel.viterbi_decoder(emissions, batch_lens)
-
+
pred_paths = pred_paths.numpy().tolist()
- pred_paths = [tag_seq[:tag_len] for tag_seq, tag_len in zip(pred_paths, batch_lens)]
-
+ pred_paths = [
+ tag_seq[:tag_len]
+ for tag_seq, tag_len in zip(pred_paths, batch_lens)
+ ]
+
batch_tags = batch_tags.numpy().tolist()
- real_paths = [tag_seq[:tag_len] for tag_seq, tag_len in zip(batch_tags, batch_lens)]
+ real_paths = [
+ tag_seq[:tag_len]
+ for tag_seq, tag_len in zip(batch_tags, batch_lens)
+ ]
slot_metric.update(pred_paths=pred_paths, real_paths=real_paths)
-
+
pred_intents = paddle.argmax(intent_logits, axis=1)
intent_metric.update(pred_intents, batch_intents)
@@ -70,8 +85,12 @@ def evaluate(jointModel=None, test_set=None, args=None):
print("\n")
-if __name__=="__main__":
- parser.add_argument("--model_path", type=str, default="", help="the path of the saved model that you would like to verify")
+if __name__ == "__main__":
+ parser.add_argument(
+ "--model_path",
+ type=str,
+ default="",
+ help="the path of the saved model that you would like to verify")
model_path = parser.parse_args().model_path
# configuring model training
@@ -79,14 +98,22 @@ def evaluate(jointModel=None, test_set=None, args=None):
args = yaml.load(f.read())
# loading testset
- test_set = ATISDataset(args["test_path"], args["vocab_path"], args["intent_path"], args["slot_path"])
+ test_set = ATISDataset(args["test_path"], args["vocab_path"],
+ args["intent_path"], args["slot_path"])
args["vocab_size"] = test_set.vocab_size
args["num_intents"] = test_set.num_intents
args["num_slots"] = test_set.num_slots
# loading model
loaded_state_dict = paddle.load(model_path)
- jointModel = JointModel(args["vocab_size"], args["embedding_size"], args["lstm_hidden_size"], args["num_intents"], args["num_slots"], num_layers=args["lstm_layers"], drop_p=args["dropout_rate"])
+ jointModel = JointModel(
+ args["vocab_size"],
+ args["embedding_size"],
+ args["lstm_hidden_size"],
+ args["num_intents"],
+ args["num_slots"],
+ num_layers=args["lstm_layers"],
+ drop_p=args["dropout_rate"])
jointModel.load_dict(loaded_state_dict)
# evaluate model
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/metric.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/metric.py
old mode 100644
new mode 100755
index c50cda0aa..a7949975e
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/metric.py
@@ -12,22 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
from collections import Counter
import numpy as np
import paddle
+
class SeqEntityScore(object):
def __init__(self, id2tag):
self.id2tag = id2tag
self.real_entities = []
self.pred_entities = []
self.correct_entities = []
-
+
def compute(self, real_count, pred_count, correct_count):
recall = 0 if real_count == 0 else (correct_count / real_count)
precision = 0 if pred_count == 0 else (correct_count / pred_count)
- f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
+ f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (
+ precision + recall)
return recall, precision, f1
def get_result(self):
@@ -39,16 +40,26 @@ def get_result(self):
real_count = count
pred_count = pred_counter.get(label, 0)
correct_count = correct_counter.get(label, 0)
- recall, precision, f1 = self.compute(real_count, pred_count, correct_count)
- result[label] = {"Precision": round(precision, 4), 'Recall': round(recall, 4), 'F1': round(f1, 4)}
+ recall, precision, f1 = self.compute(real_count, pred_count,
+ correct_count)
+ result[label] = {
+ "Precision": round(precision, 4),
+ 'Recall': round(recall, 4),
+ 'F1': round(f1, 4)
+ }
real_total_count = len(self.real_entities)
pred_total_count = len(self.pred_entities)
correct_total_count = len(self.correct_entities)
- recall, precision, f1 = self.compute(real_total_count, pred_total_count, correct_total_count)
- result["Total"] = {"Precision": round(precision, 4), 'Recall': round(recall, 4), 'F1': round(f1, 4)}
+ recall, precision, f1 = self.compute(
+ real_total_count, pred_total_count, correct_total_count)
+ result["Total"] = {
+ "Precision": round(precision, 4),
+ 'Recall': round(recall, 4),
+ 'F1': round(f1, 4)
+ }
return result
-
+
def get_entities_bios(self, seq):
entities = []
entity = [-1, -1, -1]
@@ -116,20 +127,25 @@ def get_entities_bio(self, seq):
return entities
def update(self, real_paths, pred_paths):
-
+
for real_path, pred_path in zip(real_paths, pred_paths):
real_ents = self.get_entities_bio(real_path)
pred_ents = self.get_entities_bio(pred_path)
self.real_entities.extend(real_ents)
self.pred_entities.extend(pred_ents)
- self.correct_entities.extend([pred_ent for pred_ent in pred_ents if pred_ent in real_ents])
+ self.correct_entities.extend(
+ [pred_ent for pred_ent in pred_ents if pred_ent in real_ents])
def format_print(self, result, print_detail=False):
def print_item(entity, metric):
if entity != "Total":
- print(f"Entity: {entity} - Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}")
+ print(
+ f"Entity: {entity} - Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}"
+ )
else:
- print(f"Total: Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}")
+ print(
+ f"Total: Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}"
+ )
print_item("Total", result["Total"])
if print_detail:
@@ -145,9 +161,8 @@ def __init__(self, id2label):
self.id2label = id2label
self.all_pred_labels = []
self.all_real_labels = []
- self.all_correct_labels = []
-
-
+ self.all_correct_labels = []
+
def update(self, pred_labels, real_labels):
if not isinstance(pred_labels, list):
pred_labels = pred_labels.numpy().tolist()
@@ -159,12 +174,17 @@ def update(self, pred_labels, real_labels):
self.all_pred_labels.extend(pred_labels)
self.all_real_labels.extend(real_labels)
- self.all_correct_labels.extend([pred_label for pred_label, real_label in zip(pred_labels, real_labels) if pred_label==real_label])
-
- def compute(self, pred_count , real_count, correct_count):
- recall = 0. if real_count == 0 else (correct_count / real_count)
+ self.all_correct_labels.extend([
+ pred_label
+ for pred_label, real_label in zip(pred_labels, real_labels)
+ if pred_label == real_label
+ ])
+
+ def compute(self, pred_count, real_count, correct_count):
+ recall = 0. if real_count == 0 else (correct_count / real_count)
precision = 0. if pred_count == 0 else (correct_count / pred_count)
- f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
+ f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (
+ precision + recall)
return precision, recall, f1
def get_result(self):
@@ -176,17 +196,25 @@ def get_result(self):
real_count = count
pred_count = pred_counter[label]
correct_count = correct_counter[label]
- precision, recall, f1 = self.compute(pred_count, real_count, correct_count)
- result[label] = {"Precision": round(precision, 4), 'Recall': round(recall, 4), 'F1': round(f1, 4)}
- total_acc = (np.array(self.all_pred_labels) == np.array(self.all_real_labels)).mean()
+ precision, recall, f1 = self.compute(pred_count, real_count,
+ correct_count)
+ result[label] = {
+ "Precision": round(precision, 4),
+ 'Recall': round(recall, 4),
+ 'F1': round(f1, 4)
+ }
+ total_acc = (np.array(self.all_pred_labels) ==
+ np.array(self.all_real_labels)).mean()
result["Total"] = {"Accuracy": total_acc}
- return result
+ return result
def format_print(self, result, print_detail=False):
def print_item(entity, metric):
if entity != "Total":
- print(f"Entity: {entity} - Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}")
+ print(
+ f"Entity: {entity} - Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}"
+ )
else:
print(f"Intent Accuracy: {metric['Accuracy']}")
@@ -197,5 +225,3 @@ def print_item(entity, metric):
continue
print_item(key, result[key])
print("\n")
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/model.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/model.py
old mode 100644
new mode 100755
index d8cc710db..e19b870e8
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/model.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/model.py
@@ -12,15 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import paddle.nn as nn
import paddle_crf as crf
import paddle.nn.functional as F
+
class JointModel(paddle.nn.Layer):
-
- def __init__(self, vocab_size, embedding_size, hidden_size, num_intents, num_slots, num_layers=1, drop_p=0.1):
+ def __init__(self,
+ vocab_size,
+ embedding_size,
+ hidden_size,
+ num_intents,
+ num_slots,
+ num_layers=1,
+ drop_p=0.1):
super(JointModel, self).__init__()
self.vocab_size = vocab_size
self.embedding_size = embedding_size
@@ -31,41 +37,39 @@ def __init__(self, vocab_size, embedding_size, hidden_size, num_intents, num_slo
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.dropout = nn.Dropout(p=drop_p)
- self.layer_norm = nn.LayerNorm(2*hidden_size)
- self.bilstm = nn.LSTM(input_size=embedding_size, hidden_size=hidden_size, direction="bidirectional", num_layers=num_layers, dropout=drop_p)
- self.ner_classifier = nn.Linear(hidden_size*2, num_slots+2)
- self.intent_classifier = nn.Linear(hidden_size*2, num_intents)
-
- self.crf = crf.LinearChainCrf(num_slots, crf_lr=0.001, with_start_stop_tag=True)
+ self.layer_norm = nn.LayerNorm(2 * hidden_size)
+ self.bilstm = nn.LSTM(
+ input_size=embedding_size,
+ hidden_size=hidden_size,
+ direction="bidirectional",
+ num_layers=num_layers,
+ dropout=drop_p)
+ self.ner_classifier = nn.Linear(hidden_size * 2, num_slots + 2)
+ self.intent_classifier = nn.Linear(hidden_size * 2, num_intents)
+
+ self.crf = crf.LinearChainCrf(
+ num_slots, crf_lr=0.001, with_start_stop_tag=True)
self.crf_loss = crf.LinearChainCrfLoss(self.crf)
self.viterbi_decoder = crf.ViterbiDecoder(self.crf.transitions)
-
def forward(self, inputs, lens):
batch_size, seq_len = inputs.shape
inputs_embedding = self.embedding(inputs)
if self.drop_p:
- inputs_embedding = self.dropout(inputs_embedding)
+ inputs_embedding = self.dropout(inputs_embedding)
lstm_outputs, _ = self.bilstm(inputs_embedding)
lstm_outputs = self.layer_norm(lstm_outputs)
emissions = self.ner_classifier(lstm_outputs)
- indices = paddle.stack([paddle.arange(batch_size), lens-1], axis=1)
+ indices = paddle.stack([paddle.arange(batch_size), lens - 1], axis=1)
last_step_hiddens = paddle.gather_nd(lstm_outputs, indices)
intent_logits = self.intent_classifier(last_step_hiddens)
return emissions, intent_logits
-
def get_slot_loss(self, features, lens, tags):
slot_loss = self.crf_loss(features, lens, tags)
slot_loss = paddle.mean(slot_loss)
return slot_loss
-
def get_intent_loss(self, intent_logits, intent_labels):
return F.cross_entropy(intent_logits, intent_labels)
-
-
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/paddle_crf.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/paddle_crf.py
old mode 100644
new mode 100755
index 04602c389..3a6a59e8e
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/paddle_crf.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/paddle_crf.py
@@ -351,11 +351,11 @@ def forward(self, inputs, lengths):
lengths_np = lengths.numpy()
batch_path = []
max_len = 0
-
+
for batch_id in range(batch_size):
best_last_tag = last_ids[batch_id]
path = [best_last_tag]
- for hist in reversed(historys[:(lengths_np[batch_id]-1)]):
+ for hist in reversed(historys[:(lengths_np[batch_id] - 1)]):
best_last_tag = hist[batch_id][best_last_tag]
path.append(best_last_tag)
@@ -373,4 +373,3 @@ def _get_batch_index(self, batch_size):
0]:
self._batch_index = paddle.arange(end=batch_size, dtype="int64")
return self._batch_index
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/predict.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/predict.py
old mode 100644
new mode 100755
index 923550c23..120302b15
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/predict.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import yaml
import argparse
@@ -29,39 +28,42 @@ def load_dict(dict_path):
with open(dict_path, "r", encoding="utf-8") as f:
words = [word.strip() for word in f.readlines()]
dict2id = dict(zip(words, range(len(words))))
- id2dict = {v:k for k,v in dict2id.items()}
+ id2dict = {v: k for k, v in dict2id.items()}
return dict2id, id2dict
-
def predict(jointModel, token2id, id2slot, metric):
jointModel.eval()
-
+
while True:
input_text = input("input query: ")
if input_text == "quit":
break
splited_text = input_text.split()
- tokens = [token2id.get(token, token2id["[unk]"]) for token in splited_text]
+ tokens = [
+ token2id.get(token, token2id["[unk]"]) for token in splited_text
+ ]
tokens_len = len(tokens)
-
+
if tokens_len < 2:
- print(f"the squence [{input_text}] is too short, please input valid text sequence.")
+ print(
+ f"the squence [{input_text}] is too short, please input valid text sequence."
+ )
continue
-
+
# constructing data to input to model
tokens = paddle.to_tensor(tokens, dtype="int64").unsqueeze(0)
tokens_len = paddle.to_tensor([tokens_len], dtype="int64")
# computing emission score and intent score
emissions, intent_logits = jointModel(tokens, tokens_len)
-
+
# decoding with viterbi
_, pred_paths = jointModel.viterbi_decoder(emissions, tokens_len)
- entities = metric.get_entities_bio(pred_paths[0][:tokens_len[0]])
-
+ entities = metric.get_entities_bio(pred_paths[0][:tokens_len[0]])
+
# obtaining the intent
intent_id = paddle.argmax(intent_logits, axis=1).numpy()[0]
@@ -69,15 +71,17 @@ def predict(jointModel, token2id, id2slot, metric):
print("intent:", id2intent[intent_id])
for entity in entities:
entity_type, start, end = entity
- entity_text = " ".join(splited_text[start:end+1])
+ entity_text = " ".join(splited_text[start:end + 1])
print(f"{entity_text} : {entity_type}")
-
-if __name__=="__main__":
- parser.add_argument("--model_path", default="", help="the path of the saved model that you would like to verify")
+if __name__ == "__main__":
+ parser.add_argument(
+ "--model_path",
+ default="",
+ help="the path of the saved model that you would like to verify")
model_path = parser.parse_args().model_path
-
+
# configuring model training
with open("config.yaml", "r", encoding="utf-8") as f:
args = yaml.load(f.read())
@@ -94,8 +98,14 @@ def predict(jointModel, token2id, id2slot, metric):
# load model
loaded_state_dict = paddle.load(model_path)
- jointModel = JointModel(args["vocab_size"], args["embedding_size"], args["lstm_hidden_size"], args["num_intents"], args["num_slots"], num_layers=args["lstm_layers"], drop_p=args["dropout_rate"])
+ jointModel = JointModel(
+ args["vocab_size"],
+ args["embedding_size"],
+ args["lstm_hidden_size"],
+ args["num_intents"],
+ args["num_slots"],
+ num_layers=args["lstm_layers"],
+ drop_p=args["dropout_rate"])
jointModel.load_dict(loaded_state_dict)
-
predict(jointModel, token2id, id2slot, metric)
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/train.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/train.py
old mode 100644
new mode 100755
index 69bbe697d..a646b6b7b
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/train.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import yaml
import paddle
@@ -23,6 +22,7 @@
import paddle.nn.functional as F
from evaluate import evaluate
+
def collate_fn(batch, token_pad_val=0, tag_pad_val=0):
token_list, tag_list, intent_list, len_list = [], [], [], []
for tokens, tags, intent, len_ in batch:
@@ -34,71 +34,103 @@ def collate_fn(batch, token_pad_val=0, tag_pad_val=0):
# padding sequence
max_len = max(map(len, token_list))
for i in range(len(token_list)):
- token_list[i] = token_list[i] + [token_pad_val] * (max_len-len(token_list[i]))
- tag_list[i] = tag_list[i] + [tag_pad_val] * (max_len - len(tag_list[i]))
-
- return paddle.to_tensor(token_list), paddle.to_tensor(tag_list), paddle.to_tensor(intent_list), paddle.to_tensor(len_list)
+ token_list[i] = token_list[i] + [token_pad_val] * (max_len -
+ len(token_list[i]))
+ tag_list[i] = tag_list[i] + [tag_pad_val] * (max_len - len(tag_list[i])
+ )
+
+ return paddle.to_tensor(token_list), paddle.to_tensor(
+ tag_list), paddle.to_tensor(intent_list), paddle.to_tensor(len_list)
def train():
-
+
# configuring model training
with open("config.yaml", "r", encoding="utf-8") as f:
args = yaml.load(f.read())
- train_set = ATISDataset(args["train_path"], args["vocab_path"], args["intent_path"], args["slot_path"])
- test_set = ATISDataset(args["test_path"], args["vocab_path"], args["intent_path"], args["slot_path"])
+ train_set = ATISDataset(args["train_path"], args["vocab_path"],
+ args["intent_path"], args["slot_path"])
+ test_set = ATISDataset(args["test_path"], args["vocab_path"],
+ args["intent_path"], args["slot_path"])
- print("train:",len(train_set))
+ print("train:", len(train_set))
print("test:", len(test_set))
args["vocab_size"] = train_set.vocab_size
args["num_intents"] = train_set.num_intents
args["num_slots"] = train_set.num_slots
- train_loader = DataLoader(train_set, batch_size=args["batch_size"], shuffle=True, drop_last=True, collate_fn=collate_fn)
- test_loader = DataLoader(test_set, batch_size=args["batch_size"], shuffle=False, drop_last=False, collate_fn=collate_fn)
+ train_loader = DataLoader(
+ train_set,
+ batch_size=args["batch_size"],
+ shuffle=True,
+ drop_last=True,
+ collate_fn=collate_fn)
+ test_loader = DataLoader(
+ test_set,
+ batch_size=args["batch_size"],
+ shuffle=False,
+ drop_last=False,
+ collate_fn=collate_fn)
- jointModel = JointModel(args["vocab_size"], args["embedding_size"], args["lstm_hidden_size"], args["num_intents"], args["num_slots"], num_layers=args["lstm_layers"], drop_p=args["dropout_rate"])
+ jointModel = JointModel(
+ args["vocab_size"],
+ args["embedding_size"],
+ args["lstm_hidden_size"],
+ args["num_intents"],
+ args["num_slots"],
+ num_layers=args["lstm_layers"],
+ drop_p=args["dropout_rate"])
use_gpu = True if paddle.get_device().startswith("gpu") else False
if use_gpu:
paddle.set_device('gpu:0')
- optimizer = paddle.optimizer.Adam(learning_rate=args["learning_rate"], beta1=0.9, beta2=0.99, parameters=jointModel.parameters())
-
+ optimizer = paddle.optimizer.Adam(
+ learning_rate=args["learning_rate"],
+ beta1=0.9,
+ beta2=0.99,
+ parameters=jointModel.parameters())
+
jointModel.train()
# training and evaluating model
- for epoch in range(1, args["num_epochs"]+1):
+ for epoch in range(1, args["num_epochs"] + 1):
for step, batch in enumerate(train_loader()):
batch_tokens, batch_tags, batch_intents, batch_lens = batch
emissions, intent_logits = jointModel(batch_tokens, batch_lens)
# compute slot prediction loss
- slot_loss = jointModel.get_slot_loss(emissions, batch_lens, batch_tags)
+ slot_loss = jointModel.get_slot_loss(emissions, batch_lens,
+ batch_tags)
# compute intent prediction loss
- intent_loss = jointModel.get_intent_loss(intent_logits, batch_intents)
+ intent_loss = jointModel.get_intent_loss(intent_logits,
+ batch_intents)
# sum slot_loss and intent_loss
loss = slot_loss + intent_loss
-
+
loss.backward()
optimizer.step()
optimizer.clear_gradients()
-
- if step!=0 and step % args["log_steps"] == 0:
- print("Epoch: %d, step: %d, total loss: %.4f, intent_loss: %.4f, slot_loss:%.4f" % (epoch, step, loss, intent_loss, slot_loss))
- if step!=0 and step % args["eval_steps"] == 0:
+
+ if step != 0 and step % args["log_steps"] == 0:
+ print(
+ "Epoch: %d, step: %d, total loss: %.4f, intent_loss: %.4f, slot_loss:%.4f"
+ % (epoch, step, loss, intent_loss, slot_loss))
+ if step != 0 and step % args["eval_steps"] == 0:
evaluate(jointModel, test_set, args)
jointModel.train()
-
- if (args["save_epochs"] != -1 and epoch % args["save_epochs"] == 0) or epoch == args["num_epochs"]:
+
+ if (args["save_epochs"] != -1 and epoch % args["save_epochs"] == 0
+ ) or epoch == args["num_epochs"]:
if not os.path.exists(args["save_dir"]):
os.makedirs(args["save_dir"])
- save_model_path = os.path.join(args["save_dir"], "jointModel_e{}.pdparams".format(epoch))
- paddle.save(jointModel.state_dict(), save_model_path)
-
+ save_model_path = os.path.join(
+ args["save_dir"], "jointModel_e{}.pdparams".format(epoch))
+ paddle.save(jointModel.state_dict(), save_model_path)
+
# save training args
save_args_path = os.path.join(args["save_dir"], "args.pdparams")
paddle.save(args, save_args_path)
-if __name__=="__main__":
+if __name__ == "__main__":
train()
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/README.md b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/README.md
old mode 100644
new mode 100755
index a9b4b8620..9b230da68
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/README.md
@@ -15,7 +15,7 @@
## 2. 数据说明
-CrossWOZ 是第一个大规模的中国跨域任务导向的数据集。 它包含5个领域,共计6K对话和102K语句,包括酒店、餐厅、景点、地铁和出租车。 此外,语料库包含丰富的用户和系统端对话状态和对话行为的注释。
+CrossWOZ 是第一个大规模的中国跨域任务导向的数据集。 它包含5个领域,共计6K对话和102K语句,包括酒店、餐厅、景点、地铁和出租车。 此外,语料库包含丰富的用户和系统端对话状态和对话行为的注释。
关于数据集的更多信息请参考论文[ CrossWOZ: A Large-Scale Chinese Cross-Domain Task-Oriented Dialogue Dataset ](https://arxiv.org/abs/2002.11893) 。
@@ -31,7 +31,7 @@ sh run_train.sh
可按如下方式,使用测试集进行模型测试。
```shell
-sh run_evaluate.sh
+sh run_evaluate.sh
```
### 3.3 模型推理
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/data.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/data.py
old mode 100644
new mode 100755
index 779194720..c24436807
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/data.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/data.py
@@ -12,42 +12,61 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import json
+
def load_dict(path):
with open(path, "r", encoding="utf-8") as f:
label_list = json.load(f)
-
+
label2id = dict([(label, idx) for idx, label in enumerate(label_list)])
id2label = dict([(idx, label) for label, idx in label2id.items()])
return label2id, id2label
+
def read(data_path):
with open(data_path, "r", encoding="utf-8") as f:
examples = json.load(f)
for example in examples:
- yield {"words":example[0], "slot_labels": example[1], "intent_labels":example[2], "history": example[4]}
+ yield {
+ "words": example[0],
+ "slot_labels": example[1],
+ "intent_labels": example[2],
+ "history": example[4]
+ }
+
-def convert_example_to_feature(example, tokenizer, slot2id, intent2id, use_history=False, pad_default_tag=0, max_seq_len=512):
- features = tokenizer(example["words"], is_split_into_words=True, max_seq_len=max_seq_len)
+def convert_example_to_feature(example,
+ tokenizer,
+ slot2id,
+ intent2id,
+ use_history=False,
+ pad_default_tag=0,
+ max_seq_len=512):
+ features = tokenizer(
+ example["words"], is_split_into_words=True, max_seq_len=max_seq_len)
# truncate slot sequence to make sure: the length of slot sequence is equal to word sequence
- slot_ids = [slot2id[slot] for slot in example["slot_labels"][:(max_seq_len-2)]]
- slot_ids = [slot2id[pad_default_tag]] + slot_ids + [slot2id[pad_default_tag]]
+ slot_ids = [
+ slot2id[slot] for slot in example["slot_labels"][:(max_seq_len - 2)]
+ ]
+ slot_ids = [slot2id[pad_default_tag]
+ ] + slot_ids + [slot2id[pad_default_tag]]
assert len(features["input_ids"]) == len(slot_ids)
# get intent feature
intent_labels = [0] * len(intent2id)
for intent in example["intent_labels"]:
- intent_labels[intent2id[intent]] = 1
+ intent_labels[intent2id[intent]] = 1
# get history feature
if use_history:
- history_features = tokenizer("".join(example["history"]), max_seq_len=max_seq_len)
+ history_features = tokenizer(
+ "".join(example["history"]), max_seq_len=max_seq_len)
else:
- history_features = {"input_ids":[], "token_type_ids":[]}
-
- return features["input_ids"], features["token_type_ids"], intent_labels, slot_ids, history_features["input_ids"]
+ history_features = {"input_ids": [], "token_type_ids": []}
+ return features["input_ids"], features[
+ "token_type_ids"], intent_labels, slot_ids, history_features[
+ "input_ids"]
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/evaluate.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/evaluate.py
old mode 100644
new mode 100755
index f91130f0b..85d2732dd
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/evaluate.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/evaluate.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import argparse
import paddle
from functools import partial
@@ -26,7 +25,6 @@
from metric import SeqEntityScore, MultiLabelClassificationScore
-
def evaluate(joint_model, data_loader, intent_metric, slot_metric):
joint_model.eval()
@@ -34,9 +32,11 @@ def evaluate(joint_model, data_loader, intent_metric, slot_metric):
slot_metric.reset()
for idx, batch_data in enumerate(data_loader):
input_ids, token_type_ids, intent_labels, tag_ids, history_ids = batch_data
- intent_logits, slot_logits = joint_model(input_ids, token_type_ids=token_type_ids, history_ids=history_ids)
+ intent_logits, slot_logits = joint_model(
+ input_ids, token_type_ids=token_type_ids, history_ids=history_ids)
# count intent metric
- intent_metric.update(pred_labels=intent_logits, real_labels=intent_labels)
+ intent_metric.update(
+ pred_labels=intent_logits, real_labels=intent_labels)
# count slot metric
slot_pred_labels = slot_logits.argmax(axis=-1)
slot_metric.update(pred_paths=slot_pred_labels, real_paths=tag_ids)
@@ -47,14 +47,14 @@ def evaluate(joint_model, data_loader, intent_metric, slot_metric):
return intent_results, slot_results
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default=None, help="model path that you saved")
parser.add_argument("--test_path", type=str, default=None, help="test data")
parser.add_argument("--slot_dict_path", type=str, default=None, help="slot dict path")
parser.add_argument("--intent_dict_path", type=str, default=None, help="intent dict path")
- parser.add_argument("--use_history", type=bool, default=False, help="use history or not")
+ parser.add_argument("--use_history", type=bool, default=False, help="use history or not")
parser.add_argument("--batch_size", type=int, default=32, help="Total examples' number in batch for training.")
parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.")
args = parser.parse_args()
@@ -65,7 +65,7 @@ def evaluate(joint_model, data_loader, intent_metric, slot_metric):
intent2id, id2intent = load_dict(args.intent_dict_path)
slot2id, id2slot = load_dict(args.slot_dict_path)
test_ds = load_dataset(read, data_path=args.test_path, lazy=False)
-
+
tokenizer = ErnieTokenizer.from_pretrained(model_name)
trans_func = partial(convert_example_to_feature, tokenizer=tokenizer, slot2id=slot2id, intent2id=intent2id, use_history=args.use_history, pad_default_tag="O", max_seq_len=args.max_seq_len)
test_ds = test_ds.map(trans_func, lazy=False)
@@ -84,18 +84,15 @@ def evaluate(joint_model, data_loader, intent_metric, slot_metric):
# load model
loaded_state_dict = paddle.load(args.model_path)
ernie = ErnieModel.from_pretrained(model_name)
- joint_model = JointModel(ernie, len(slot2id), len(intent2id), use_history=args.use_history, dropout=0.1)
+ joint_model = JointModel(ernie, len(slot2id), len(intent2id), use_history=args.use_history, dropout=0.1)
joint_model.load_dict(loaded_state_dict)
print(args.use_history)
intent_metric = MultiLabelClassificationScore(id2intent)
slot_metric = SeqEntityScore(id2slot)
-
+
# evalute on dev data
intent_results, slot_results = evaluate(joint_model, test_loader, intent_metric, slot_metric)
intent_result, slot_result = intent_results["Total"], slot_results["Total"]
print(f'intent evalution result: precision: {intent_result["Precision"]:.5f}, recall: {intent_result["Recall"]:.5f}, F1: {intent_result["F1"]:.5f}')
print(f'slot evalution result: precision: {slot_result["Precision"]:.5f}, recall: {slot_result["Recall"]:.5f}, F1: {slot_result["F1"]:.5f}')
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/metric.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/metric.py
old mode 100644
new mode 100755
index 0e5209fc8..5aedc1740
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/metric.py
@@ -12,18 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
from collections import Counter
import numpy as np
import paddle
+
class SeqEntityScore(object):
def __init__(self, id2tag):
self.id2tag = id2tag
self.real_entities = []
self.pred_entities = []
self.correct_entities = []
-
+
def reset(self):
self.real_entities.clear()
self.pred_entities.clear()
@@ -32,7 +32,8 @@ def reset(self):
def compute(self, real_count, pred_count, correct_count):
recall = 0 if real_count == 0 else (correct_count / real_count)
precision = 0 if pred_count == 0 else (correct_count / pred_count)
- f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
+ f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (
+ precision + recall)
return recall, precision, f1
def get_result(self):
@@ -44,16 +45,26 @@ def get_result(self):
real_count = count
pred_count = pred_counter.get(label, 0)
correct_count = correct_counter.get(label, 0)
- recall, precision, f1 = self.compute(real_count, pred_count, correct_count)
- result[label] = {"Precision": round(precision, 4), 'Recall': round(recall, 4), 'F1': round(f1, 4)}
+ recall, precision, f1 = self.compute(real_count, pred_count,
+ correct_count)
+ result[label] = {
+ "Precision": round(precision, 4),
+ 'Recall': round(recall, 4),
+ 'F1': round(f1, 4)
+ }
real_total_count = len(self.real_entities)
pred_total_count = len(self.pred_entities)
correct_total_count = len(self.correct_entities)
- recall, precision, f1 = self.compute(real_total_count, pred_total_count, correct_total_count)
- result["Total"] = {"Precision": round(precision, 4), 'Recall': round(recall, 4), 'F1': round(f1, 4)}
+ recall, precision, f1 = self.compute(
+ real_total_count, pred_total_count, correct_total_count)
+ result["Total"] = {
+ "Precision": round(precision, 4),
+ 'Recall': round(recall, 4),
+ 'F1': round(f1, 4)
+ }
return result
-
+
def get_entities_bios(self, seq):
entities = []
entity = [-1, -1, -1]
@@ -96,7 +107,7 @@ def get_entities_bio(self, seq):
for indx, tag in enumerate(seq):
if not isinstance(tag, str):
tag = self.id2tag[tag]
-
+
if tag.startswith("B+"):
if entity[2] != -1:
entities.append(entity)
@@ -119,7 +130,7 @@ def get_entities_bio(self, seq):
return entities
def update(self, real_paths, pred_paths):
-
+
if isinstance(real_paths, paddle.Tensor):
real_paths = real_paths.numpy()
if isinstance(pred_paths, paddle.Tensor):
@@ -130,14 +141,19 @@ def update(self, real_paths, pred_paths):
pred_ents = self.get_entities_bio(pred_path)
self.real_entities.extend(real_ents)
self.pred_entities.extend(pred_ents)
- self.correct_entities.extend([pred_ent for pred_ent in pred_ents if pred_ent in real_ents])
+ self.correct_entities.extend(
+ [pred_ent for pred_ent in pred_ents if pred_ent in real_ents])
def format_print(self, result, print_detail=False):
def print_item(entity, metric):
if entity != "Total":
- print(f"Entity: {entity} - Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}")
+ print(
+ f"Entity: {entity} - Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}"
+ )
else:
- print(f"Total: Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}")
+ print(
+ f"Total: Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}"
+ )
print_item("Total", result["Total"])
if print_detail:
@@ -153,13 +169,13 @@ def __init__(self, id2label):
self.id2label = id2label
self.all_pred_labels = []
self.all_real_labels = []
- self.all_correct_labels = []
-
+ self.all_correct_labels = []
+
def reset(self):
self.all_pred_labels.clear()
self.all_real_labels.clear()
self.all_correct_labels.clear()
-
+
def update(self, pred_labels, real_labels):
if not isinstance(pred_labels, list):
pred_labels = pred_labels.numpy().tolist()
@@ -168,17 +184,18 @@ def update(self, pred_labels, real_labels):
for i in range(len(real_labels)):
for j in range(len(real_labels[0])):
- if real_labels[i][j] == 1 and pred_labels[i][j] > 0:
+ if real_labels[i][j] == 1 and pred_labels[i][j] > 0:
self.all_correct_labels.append(self.id2label[j])
if real_labels[i][j] == 1:
self.all_real_labels.append(self.id2label[j])
if pred_labels[i][j] > 0:
self.all_pred_labels.append(self.id2label[j])
- def compute(self, pred_count , real_count, correct_count):
- recall = 0. if real_count == 0 else (correct_count / real_count)
+ def compute(self, pred_count, real_count, correct_count):
+ recall = 0. if real_count == 0 else (correct_count / real_count)
precision = 0. if pred_count == 0 else (correct_count / pred_count)
- f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
+ f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (
+ precision + recall)
return precision, recall, f1
def get_result(self):
@@ -190,22 +207,36 @@ def get_result(self):
real_count = count
pred_count = pred_counter[label]
correct_count = correct_counter[label]
- precision, recall, f1 = self.compute(pred_count, real_count, correct_count)
- result[label] = {"Precision": round(precision, 4), 'Recall': round(recall, 4), 'F1': round(f1, 4)}
+ precision, recall, f1 = self.compute(pred_count, real_count,
+ correct_count)
+ result[label] = {
+ "Precision": round(precision, 4),
+ 'Recall': round(recall, 4),
+ 'F1': round(f1, 4)
+ }
real_total_count = len(self.all_real_labels)
pred_total_count = len(self.all_pred_labels)
correct_total_count = len(self.all_correct_labels)
- recall, precision, f1 = self.compute(real_total_count, pred_total_count, correct_total_count)
- result["Total"] = {"Precision": round(precision, 4), 'Recall': round(recall, 4), 'F1': round(f1, 4)}
+ recall, precision, f1 = self.compute(
+ real_total_count, pred_total_count, correct_total_count)
+ result["Total"] = {
+ "Precision": round(precision, 4),
+ 'Recall': round(recall, 4),
+ 'F1': round(f1, 4)
+ }
- return result
+ return result
def format_print(self, result, print_detail=False):
def print_item(entity, metric):
if entity != "Total":
- print(f"Entity: {entity} - Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}")
+ print(
+ f"Entity: {entity} - Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}"
+ )
else:
- print(f"Total: Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}")
+ print(
+ f"Total: Precision: {metric['Precision']} - Recall: {metric['Recall']} - F1: {metric['F1']}"
+ )
print_item("Total", result["Total"])
if print_detail:
@@ -213,6 +244,4 @@ def print_item(entity, metric):
if key == "Total":
continue
print_item(key, result[key])
- print("\n")
-
-
+ print("\n")
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/model.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/model.py
old mode 100644
new mode 100755
index bc1684195..29ac1d386
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/model.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/model.py
@@ -12,43 +12,71 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddlenlp.transformers import ErniePretrainedModel
+
class JointModel(paddle.nn.Layer):
- def __init__(self, ernie, num_slots, num_intents, use_history=False, dropout=None):
+ def __init__(self,
+ ernie,
+ num_slots,
+ num_intents,
+ use_history=False,
+ dropout=None):
super(JointModel, self).__init__()
self.num_slots = num_slots
self.num_intents = num_intents
self.use_history = use_history
self.ernie = ernie
- self.dropout = nn.Dropout(dropout if dropout is not None else self.ernie.config["hidden_dropout_prob"])
+ self.dropout = nn.Dropout(dropout if dropout is not None else
+ self.ernie.config["hidden_dropout_prob"])
if self.use_history:
- self.intent_hidden = nn.Linear(2 * self.ernie.config["hidden_size"], self.ernie.config["hidden_size"])
- self.slot_hidden = nn.Linear(2 * self.ernie.config["hidden_size"], self.ernie.config["hidden_size"])
+ self.intent_hidden = nn.Linear(2 * self.ernie.config["hidden_size"],
+ self.ernie.config["hidden_size"])
+ self.slot_hidden = nn.Linear(2 * self.ernie.config["hidden_size"],
+ self.ernie.config["hidden_size"])
else:
- self.intent_hidden = nn.Linear(self.ernie.config["hidden_size"], self.ernie.config["hidden_size"])
- self.slot_hidden = nn.Linear(self.ernie.config["hidden_size"], self.ernie.config["hidden_size"])
+ self.intent_hidden = nn.Linear(self.ernie.config["hidden_size"],
+ self.ernie.config["hidden_size"])
+ self.slot_hidden = nn.Linear(self.ernie.config["hidden_size"],
+ self.ernie.config["hidden_size"])
- self.intent_classifier = nn.Linear(self.ernie.config["hidden_size"], self.num_intents)
- self.slot_classifier = nn.Linear(self.ernie.config["hidden_size"], self.num_slots)
+ self.intent_classifier = nn.Linear(self.ernie.config["hidden_size"],
+ self.num_intents)
+ self.slot_classifier = nn.Linear(self.ernie.config["hidden_size"],
+ self.num_slots)
-
- def forward(self, token_ids, token_type_ids=None, position_ids=None, attention_mask=None, history_ids=None):
- sequence_output, pooled_output = self.ernie(token_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask)
+ def forward(self,
+ token_ids,
+ token_type_ids=None,
+ position_ids=None,
+ attention_mask=None,
+ history_ids=None):
+ sequence_output, pooled_output = self.ernie(
+ token_ids,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ attention_mask=attention_mask)
if self.use_history and (history_ids is not None):
history_pooled_output = self.ernie(history_ids)[1]
# concat sequence_output and history output
- sequence_output = paddle.concat([history_pooled_output.unsqueeze(1).tile(repeat_times=[1, sequence_output.shape[1], 1]), sequence_output], axis=-1)
- pooled_output = paddle.concat([history_pooled_output, pooled_output], axis=-1)
+ sequence_output = paddle.concat(
+ [
+ history_pooled_output.unsqueeze(1).tile(
+ repeat_times=[1, sequence_output.shape[1], 1]),
+ sequence_output
+ ],
+ axis=-1)
+ pooled_output = paddle.concat(
+ [history_pooled_output, pooled_output], axis=-1)
- sequence_output = F.relu(self.slot_hidden(self.dropout(sequence_output)))
+ sequence_output = F.relu(
+ self.slot_hidden(self.dropout(sequence_output)))
pooled_output = F.relu(self.intent_hidden(self.dropout(pooled_output)))
intent_logits = self.intent_classifier(pooled_output)
@@ -56,4 +84,3 @@ def forward(self, token_ids, token_type_ids=None, position_ids=None, attention_m
slot_logits = self.slot_classifier(sequence_output)
return intent_logits, slot_logits
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/predict.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/predict.py
old mode 100644
new mode 100755
index 61c410064..0dfdd2255
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/predict.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import argparse
import paddle
from functools import partial
@@ -35,18 +34,28 @@ def predict(joint_model, tokenizer, id2intent, id2slot):
if input_text == "quit":
break
splited_input_text = list(input_text)
- features = tokenizer(splited_input_text, is_split_into_words=True, max_seq_len=args.max_seq_len, return_length=True)
+ features = tokenizer(
+ splited_input_text,
+ is_split_into_words=True,
+ max_seq_len=args.max_seq_len,
+ return_length=True)
input_ids = paddle.to_tensor(features["input_ids"]).unsqueeze(0)
- token_type_ids = paddle.to_tensor(features["token_type_ids"]).unsqueeze(0)
+ token_type_ids = paddle.to_tensor(features[
+ "token_type_ids"]).unsqueeze(0)
seq_len = features["seq_len"]
history_ids = paddle.to_tensor(tokenizer("")["input_ids"]).unsqueeze(0)
- intent_logits, slot_logits = joint_model(input_ids, token_type_ids=token_type_ids, history_ids=history_ids)
+ intent_logits, slot_logits = joint_model(
+ input_ids, token_type_ids=token_type_ids, history_ids=history_ids)
# parse intent labels
- intent_labels = [id2intent[idx] for idx, v in enumerate(intent_logits.numpy()[0]) if v > 0]
-
+ intent_labels = [
+ id2intent[idx] for idx, v in enumerate(intent_logits.numpy()[0])
+ if v > 0
+ ]
+
# parse slot labels
- slot_pred_labels = slot_logits.argmax(axis=-1).numpy()[0][1:(seq_len)-1]
+ slot_pred_labels = slot_logits.argmax(
+ axis=-1).numpy()[0][1:(seq_len) - 1]
slot_labels = []
for idx in slot_pred_labels:
slot_label = id2slot[idx]
@@ -56,7 +65,7 @@ def predict(joint_model, tokenizer, id2intent, id2slot):
slot_label = "".join(slot_label)
slot_labels.append(slot_label)
slot_entities = get_entities(slot_labels)
-
+
# print result
if intent_labels:
print("intents: ", ",".join(intent_labels))
@@ -64,17 +73,17 @@ def predict(joint_model, tokenizer, id2intent, id2slot):
print("intents: ", "无")
for slot_entity in slot_entities:
entity_name, start, end = slot_entity
- print(f"{entity_name}: ", "".join(splited_input_text[start:end+1]))
-
+ print(f"{entity_name}: ",
+ "".join(splited_input_text[start:end + 1]))
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default=None, help="model path that you saved")
parser.add_argument("--slot_dict_path", type=str, default=None, help="slot dict path")
parser.add_argument("--intent_dict_path", type=str, default=None, help="intent dict path")
- parser.add_argument("--use_history", type=bool, default=False, help="use history or not")
+ parser.add_argument("--use_history", type=bool, default=False, help="use history or not")
parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.")
args = parser.parse_args()
# yapf: enbale
@@ -83,17 +92,15 @@ def predict(joint_model, tokenizer, id2intent, id2slot):
model_name = "ernie-1.0"
intent2id, id2intent = load_dict(args.intent_dict_path)
slot2id, id2slot = load_dict(args.slot_dict_path)
-
+
tokenizer = ErnieTokenizer.from_pretrained(model_name)
# load model
loaded_state_dict = paddle.load(args.model_path)
ernie = ErnieModel.from_pretrained(model_name)
- joint_model = JointModel(ernie, len(slot2id), len(intent2id), use_history=args.use_history, dropout=0.1)
+ joint_model = JointModel(ernie, len(slot2id), len(intent2id), use_history=args.use_history, dropout=0.1)
joint_model.load_dict(loaded_state_dict)
-
- # evalute on dev data
- predict(joint_model, tokenizer, id2intent, id2slot)
-
+ # evalute on dev data
+ predict(joint_model, tokenizer, id2intent, id2slot)
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/run_evaluate.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/run_evaluate.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/run_predict.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/run_predict.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/run_train.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/run_train.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/train.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/train.py
old mode 100644
new mode 100755
index 7d221f729..d08869474
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/train.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import os
import ast
@@ -61,7 +60,8 @@
class JointLoss(paddle.nn.Layer):
def __init__(self, intent_weight=None):
super(JointLoss, self).__init__()
- self.intent_criterion = paddle.nn.BCEWithLogitsLoss(weight=intent_weight)
+ self.intent_criterion = paddle.nn.BCEWithLogitsLoss(
+ weight=intent_weight)
self.slot_criterion = paddle.nn.CrossEntropyLoss()
def forward(self, intent_logits, slot_logits, intent_labels, slot_labels):
@@ -96,39 +96,70 @@ def train():
for intent in example["intent_labels"]:
intent_weight[intent2id[intent]] += 1
for intent, intent_id in intent2id.items():
- neg_pos = (len(train_ds) - intent_weight[intent_id]) / intent_weight[intent_id]
+ neg_pos = (len(train_ds) - intent_weight[intent_id]
+ ) / intent_weight[intent_id]
intent_weight[intent_id] = np.log10(neg_pos)
intent_weight = paddle.to_tensor(intent_weight)
tokenizer = ErnieTokenizer.from_pretrained(model_name)
- trans_func = partial(convert_example_to_feature, tokenizer=tokenizer, slot2id=slot2id, intent2id=intent2id, use_history=args.use_history, pad_default_tag="O", max_seq_len=args.max_seq_len)
+ trans_func = partial(
+ convert_example_to_feature,
+ tokenizer=tokenizer,
+ slot2id=slot2id,
+ intent2id=intent2id,
+ use_history=args.use_history,
+ pad_default_tag="O",
+ max_seq_len=args.max_seq_len)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
- Pad(axis=0, pad_val=tokenizer.pad_token_id),
+ Pad(axis=0, pad_val=tokenizer.pad_token_id),
Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
Stack(dtype="float32"),
Pad(axis=0, pad_val=slot2id["O"], dtype="int64"),
Pad(axis=0, pad_val=tokenizer.pad_token_id)
):fn(samples)
- train_batch_sampler = paddle.io.BatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
- dev_batch_sampler = paddle.io.BatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
- train_loader = paddle.io.DataLoader(dataset=train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn, return_list=True)
- dev_loader = paddle.io.DataLoader(dataset=dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn, return_list=True)
-
+ train_batch_sampler = paddle.io.BatchSampler(
+ train_ds, batch_size=args.batch_size, shuffle=True)
+ dev_batch_sampler = paddle.io.BatchSampler(
+ dev_ds, batch_size=args.batch_size, shuffle=False)
+ train_loader = paddle.io.DataLoader(
+ dataset=train_ds,
+ batch_sampler=train_batch_sampler,
+ collate_fn=batchify_fn,
+ return_list=True)
+ dev_loader = paddle.io.DataLoader(
+ dataset=dev_ds,
+ batch_sampler=dev_batch_sampler,
+ collate_fn=batchify_fn,
+ return_list=True)
# configure model training
ernie = ErnieModel.from_pretrained(model_name)
- joint_model = JointModel(ernie, len(slot2id), len(intent2id), use_history=args.use_history, dropout=0.1)
+ joint_model = JointModel(
+ ernie,
+ len(slot2id),
+ len(intent2id),
+ use_history=args.use_history,
+ dropout=0.1)
num_training_steps = len(train_loader) * args.num_epoch
- lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_proportion)
- decay_params = [p.name for n, p in joint_model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
+ lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
+ args.warmup_proportion)
+ decay_params = [
+ p.name for n, p in joint_model.named_parameters()
+ if not any(nd in n for nd in ["bias", "norm"])
+ ]
grad_clip = paddle.nn.ClipGradByGlobalNorm(args.max_grad_norm)
- optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, parameters=joint_model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params, grad_clip=grad_clip)
+ optimizer = paddle.optimizer.AdamW(
+ learning_rate=lr_scheduler,
+ parameters=joint_model.parameters(),
+ weight_decay=args.weight_decay,
+ apply_decay_param_fun=lambda x: x in decay_params,
+ grad_clip=grad_clip)
if args.intent_weight:
joint_loss = JointLoss(intent_weight)
@@ -139,13 +170,17 @@ def train():
slot_metric = SeqEntityScore(id2slot)
# start to train joint_model
global_step, intent_best_f1, slot_best_f1 = 0, 0., 0.
- joint_model.train()
- for epoch in range(1, args.num_epoch+1):
+ joint_model.train()
+ for epoch in range(1, args.num_epoch + 1):
for batch_data in train_loader:
input_ids, token_type_ids, intent_labels, tag_ids, history_ids = batch_data
- intent_logits, slot_logits = joint_model(input_ids, token_type_ids=token_type_ids, history_ids=history_ids)
+ intent_logits, slot_logits = joint_model(
+ input_ids,
+ token_type_ids=token_type_ids,
+ history_ids=history_ids)
- loss = joint_loss(intent_logits, slot_logits, intent_labels, tag_ids)
+ loss = joint_loss(intent_logits, slot_logits, intent_labels,
+ tag_ids)
loss.backward()
lr_scheduler.step()
@@ -153,26 +188,38 @@ def train():
optimizer.clear_grad()
if global_step > 0 and global_step % args.log_step == 0:
- print(f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}")
+ print(
+ f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}"
+ )
if global_step > 0 and global_step % args.eval_step == 0:
- intent_results, slot_results = evaluate(joint_model, dev_loader, intent_metric, slot_metric)
- intent_result, slot_result = intent_results["Total"], slot_results["Total"]
+ intent_results, slot_results = evaluate(
+ joint_model, dev_loader, intent_metric, slot_metric)
+ intent_result, slot_result = intent_results[
+ "Total"], slot_results["Total"]
joint_model.train()
intent_f1, slot_f1 = intent_result["F1"], slot_result["F1"]
if intent_f1 > intent_best_f1 or slot_f1 > slot_best_f1:
- paddle.save(joint_model.state_dict(), f"{args.checkpoint}/best.pdparams")
+ paddle.save(joint_model.state_dict(),
+ f"{args.checkpoint}/best.pdparams")
if intent_f1 > intent_best_f1:
- print(f"intent best F1 performence has been updated: {intent_best_f1:.5f} --> {intent_f1:.5f}")
+ print(
+ f"intent best F1 performence has been updated: {intent_best_f1:.5f} --> {intent_f1:.5f}"
+ )
intent_best_f1 = intent_f1
if slot_f1 > slot_best_f1:
- print(f"slot best F1 performence has been updated: {slot_best_f1:.5f} --> {slot_f1:.5f}")
+ print(
+ f"slot best F1 performence has been updated: {slot_best_f1:.5f} --> {slot_f1:.5f}"
+ )
slot_best_f1 = slot_f1
- print(f'intent evalution result: precision: {intent_result["Precision"]:.5f}, recall: {intent_result["Recall"]:.5f}, F1: {intent_result["F1"]:.5f}, current best {intent_best_f1:.5f}')
- print(f'slot evalution result: precision: {slot_result["Precision"]:.5f}, recall: {slot_result["Recall"]:.5f}, F1: {slot_result["F1"]:.5f}, current best {slot_best_f1:.5f}\n')
+ print(
+ f'intent evalution result: precision: {intent_result["Precision"]:.5f}, recall: {intent_result["Recall"]:.5f}, F1: {intent_result["F1"]:.5f}, current best {intent_best_f1:.5f}'
+ )
+ print(
+ f'slot evalution result: precision: {slot_result["Precision"]:.5f}, recall: {slot_result["Recall"]:.5f}, F1: {slot_result["F1"]:.5f}, current best {slot_best_f1:.5f}\n'
+ )
global_step += 1
-
-if __name__=="__main__":
+if __name__ == "__main__":
train()
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/utils.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/utils.py
old mode 100644
new mode 100755
index 577873daa..d9f6a110a
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/ernie/utils.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import random
import numpy as np
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/README.md b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/README.md
old mode 100644
new mode 100755
index 2044a2c7e..0b678469a
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/README.md
@@ -90,7 +90,7 @@ sh run_train.sh
使用如下命令,进行模型测试。
```shell
-sh run_evaluate.sh
+sh run_evaluate.sh
```
### 3.3 模型推理
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/data.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/data.py
old mode 100644
new mode 100755
index 4ae59b80a..765ee3e48
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/data.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/data.py
@@ -23,16 +23,22 @@ def generate_dict(ori_file_path, save_label_path, save_schema_path):
with open(ori_file_path, "r", encoding="utf-8") as f:
for relation_schema in f.readlines():
relation_schema = json.loads(relation_schema)
- predicate, subject_type, object_types = relation_schema["predicate"], relation_schema["subject_type"], relation_schema["object_type"]
- schemas[predicate] = {"object_type": object_types, "subject_type": subject_type}
-
- predicates.append("S-"+predicate+"-"+subject_type)
+ predicate, subject_type, object_types = relation_schema[
+ "predicate"], relation_schema["subject_type"], relation_schema[
+ "object_type"]
+ schemas[predicate] = {
+ "object_type": object_types,
+ "subject_type": subject_type
+ }
+
+ predicates.append("S-" + predicate + "-" + subject_type)
for object_type in object_types:
- predicates.append("O-"+predicate+"-"+object_types[object_type])
+ predicates.append("O-" + predicate + "-" + object_types[
+ object_type])
with open(save_label_path, "w", encoding="utf-8") as f:
for predicate in predicates:
- f.write(predicate+"\n")
+ f.write(predicate + "\n")
print(f"predicate dict has saved: {save_label_path}")
with open(save_schema_path, "w", encoding="utf-8") as f:
@@ -40,7 +46,6 @@ def generate_dict(ori_file_path, save_label_path, save_schema_path):
print(f"schema file has saved: {save_schema_path}")
-
def get_object_keyname(reverse_schema, predicate, object_valname):
object_type_keyname = reverse_schema[predicate][object_valname]
return object_type_keyname
@@ -49,26 +54,28 @@ def get_object_keyname(reverse_schema, predicate, object_valname):
def load_schema(schema_path):
with open(schema_path, "r", encoding="utf-8") as f:
schema = json.load(f)
-
+
return schema
+
def load_reverse_schema(schema_path):
schemas = load_schema(schema_path)
reverse_schemas = copy.deepcopy(schemas)
for reverse_schema in reverse_schemas:
object_type = reverse_schemas[reverse_schema]["object_type"]
- reverse_schemas[reverse_schema]["object_type"] = dict([(v,k) for k,v in object_type.items()])
-
+ reverse_schemas[reverse_schema]["object_type"] = dict(
+ [(v, k) for k, v in object_type.items()])
+
return reverse_schemas
-
+
def load_dict(path):
with open(path, "r", encoding="utf-8") as f:
words = [word.strip() for word in f.readlines()]
word2id = dict(zip(words, range(len(words))))
id2word = dict(zip(range(len(words)), words))
-
+
return word2id, id2word
@@ -77,15 +84,15 @@ def read(data_path):
for example in f.readlines():
example = json.loads(example)
yield example
-
-
+
+
def find_entity(input_ids, entity_ids):
entity_len = len(entity_ids)
match_start, match_end = -1, -1
for idx in range(len(input_ids)):
- if input_ids[idx:idx+entity_len] == entity_ids:
+ if input_ids[idx:idx + entity_len] == entity_ids:
match_start = idx
- match_end = idx+entity_len-1
+ match_end = idx + entity_len - 1
break
return match_start, match_end
@@ -95,78 +102,108 @@ def find_entity_with_visited(input_ids, entity_ids, visited):
entity_len = len(entity_ids)
match_start, match_end = -1, -1
for idx in range(len(input_ids)):
- if sum(visited[idx:idx+entity_len])!=0:
+ if sum(visited[idx:idx + entity_len]) != 0:
continue
- if input_ids[idx:idx+entity_len] == entity_ids:
+ if input_ids[idx:idx + entity_len] == entity_ids:
match_start = idx
- match_end = idx+entity_len-1
+ match_end = idx + entity_len - 1
break
return match_start, match_end
-def convert_example_to_feature1(example, label2id, tokenizer, pad_default_label="O", max_seq_len=512):
+def convert_example_to_feature1(example,
+ label2id,
+ tokenizer,
+ pad_default_label="O",
+ max_seq_len=512):
# convert word sequence to feature
- features = tokenizer(list(example["text"]), is_split_into_words=True, max_seq_len=max_seq_len, return_length=True, return_attention_mask=True)
- input_ids, token_type_ids, attention_mask, seq_len = features["input_ids"], features["token_type_ids"], features["attention_mask"], features["seq_len"]
+ features = tokenizer(
+ list(example["text"]),
+ is_split_into_words=True,
+ max_seq_len=max_seq_len,
+ return_length=True,
+ return_attention_mask=True)
+ input_ids, token_type_ids, attention_mask, seq_len = features[
+ "input_ids"], features["token_type_ids"], features[
+ "attention_mask"], features["seq_len"]
# construct labels
labels = [[0] * len(label2id) for _ in range(seq_len)]
-
- spo_list = example["spo_list"] if "spo_list" in example.keys() else []
+
+ spo_list = example["spo_list"] if "spo_list" in example.keys() else []
for spo in spo_list:
- subject_label = "S-"+spo["predicate"]+"-"+spo["subject_type"]
+ subject_label = "S-" + spo["predicate"] + "-" + spo["subject_type"]
subject_ids = tokenizer.convert_tokens_to_ids(list(spo["subject"]))
entities = [(subject_label, subject_ids)]
for object_type in spo["object_type"]:
- object_label = "O-"+spo["predicate"]+"-"+spo["object_type"][object_type]
- object_ids = tokenizer.convert_tokens_to_ids(list(spo["object"][object_type]))
+ object_label = "O-" + spo["predicate"] + "-" + spo["object_type"][
+ object_type]
+ object_ids = tokenizer.convert_tokens_to_ids(
+ list(spo["object"][object_type]))
entities.append((object_label, object_ids))
-
+
visited = [0] * seq_len
- entities = sorted(entities, key=lambda entity: len(entity[1]), reverse=True)
+ entities = sorted(
+ entities, key=lambda entity: len(entity[1]), reverse=True)
for entity in entities:
entity_label, entity_ids = entity
-
- match_start, match_end = find_entity_with_visited(input_ids, entity_ids, visited)
+
+ match_start, match_end = find_entity_with_visited(
+ input_ids, entity_ids, visited)
if match_start < 0:
match_start, match_end = find_entity(input_ids, entity_ids)
assert match_start >= 0
- for idx in range(match_start, match_end+1):
+ for idx in range(match_start, match_end + 1):
visited[idx] = 1
labels[idx][label2id[entity_label]] = 1
-
+
for idx in range(seq_len):
if sum(labels[idx]) == 0:
labels[idx][0] = 1
return input_ids, token_type_ids, attention_mask, seq_len, labels
-
-
-def convert_example_to_feature2(example, label2id, tokenizer, pad_default_label="O", max_seq_len=512):
+
+
+def convert_example_to_feature2(example,
+ label2id,
+ tokenizer,
+ pad_default_label="O",
+ max_seq_len=512):
# convert word sequence to feature
- features = tokenizer(list(example["text"]), is_split_into_words=True, max_seq_len=max_seq_len, return_length=True, return_attention_mask=True)
- input_ids, token_type_ids, attention_mask, seq_len = features["input_ids"], features["token_type_ids"], features["attention_mask"], features["seq_len"]
+ features = tokenizer(
+ list(example["text"]),
+ is_split_into_words=True,
+ max_seq_len=max_seq_len,
+ return_length=True,
+ return_attention_mask=True)
+ input_ids, token_type_ids, attention_mask, seq_len = features[
+ "input_ids"], features["token_type_ids"], features[
+ "attention_mask"], features["seq_len"]
# construct labels
labels = [[0] * len(label2id) for _ in range(seq_len)]
- spo_list = example["spo_list"] if "spo_list" in example.keys() else []
+ spo_list = example["spo_list"] if "spo_list" in example.keys() else []
for spo in spo_list:
- subject_label = "S-"+spo["predicate"]+"-"+spo["subject_type"]
+ subject_label = "S-" + spo["predicate"] + "-" + spo["subject_type"]
subject_ids = tokenizer.convert_tokens_to_ids(list(spo["subject"]))
entities = [(subject_label, subject_ids)]
for object_type in spo["object_type"]:
- object_label = "O-"+spo["predicate"]+"-"+spo["object_type"][object_type]
- object_ids = tokenizer.convert_tokens_to_ids(list(spo["object"][object_type]))
+ object_label = "O-" + spo["predicate"] + "-" + spo["object_type"][
+ object_type]
+ object_ids = tokenizer.convert_tokens_to_ids(
+ list(spo["object"][object_type]))
entities.append((object_label, object_ids))
visited = [0] * seq_len
- entities = sorted(entities, key=lambda entity: len(entity[1]), reverse=True)
+ entities = sorted(
+ entities, key=lambda entity: len(entity[1]), reverse=True)
for entity in entities:
entity_label, entity_ids = entity
- match_start, match_end = find_entity_with_visited(input_ids, entity_ids, visited)
+ match_start, match_end = find_entity_with_visited(
+ input_ids, entity_ids, visited)
if match_start < 0:
match_start, match_end = find_entity(input_ids, entity_ids)
assert match_start >= 0
- for i, idx in enumerate(range(match_start, match_end+1)):
+ for i, idx in enumerate(range(match_start, match_end + 1)):
visited[idx] = 1
if i == 0:
labels[idx][label2id[entity_label]] = 1
@@ -177,34 +214,49 @@ def convert_example_to_feature2(example, label2id, tokenizer, pad_default_label=
if sum(labels[idx]) == 0:
labels[idx][0] = 1
- return input_ids, token_type_ids, attention_mask, seq_len, labels
+ return input_ids, token_type_ids, attention_mask, seq_len, labels
-def convert_example_to_feature(example, label2id, tokenizer, pad_default_label="O", max_seq_len=512):
+def convert_example_to_feature(example,
+ label2id,
+ tokenizer,
+ pad_default_label="O",
+ max_seq_len=512):
# convert word sequence to feature
- features = tokenizer(list(example["text"]), is_split_into_words=True, max_seq_len=max_seq_len, return_length=True, return_attention_mask=True)
- input_ids, token_type_ids, attention_mask, seq_len = features["input_ids"], features["token_type_ids"], features["attention_mask"], features["seq_len"]
+ features = tokenizer(
+ list(example["text"]),
+ is_split_into_words=True,
+ max_seq_len=max_seq_len,
+ return_length=True,
+ return_attention_mask=True)
+ input_ids, token_type_ids, attention_mask, seq_len = features[
+ "input_ids"], features["token_type_ids"], features[
+ "attention_mask"], features["seq_len"]
# construct labels
labels = [[0] * len(label2id) for _ in range(seq_len)]
- spo_list = example["spo_list"] if "spo_list" in example.keys() else []
+ spo_list = example["spo_list"] if "spo_list" in example.keys() else []
for spo in spo_list:
- subject_label = "S-"+spo["predicate"]+"-"+spo["subject_type"]
+ subject_label = "S-" + spo["predicate"] + "-" + spo["subject_type"]
subject_ids = tokenizer.convert_tokens_to_ids(list(spo["subject"]))
entities = [(subject_label, subject_ids)]
for object_type in spo["object_type"]:
- object_label = "O-"+spo["predicate"]+"-"+spo["object_type"][object_type]
- object_ids = tokenizer.convert_tokens_to_ids(list(spo["object"][object_type]))
+ object_label = "O-" + spo["predicate"] + "-" + spo["object_type"][
+ object_type]
+ object_ids = tokenizer.convert_tokens_to_ids(
+ list(spo["object"][object_type]))
entities.append((object_label, object_ids))
visited = [0] * seq_len
- entities = sorted(entities, key=lambda entity: len(entity[1]), reverse=True)
+ entities = sorted(
+ entities, key=lambda entity: len(entity[1]), reverse=True)
for entity in entities:
entity_label, entity_ids = entity
- match_start, match_end = find_entity_with_visited(input_ids, entity_ids, visited)
-
+ match_start, match_end = find_entity_with_visited(
+ input_ids, entity_ids, visited)
+
if match_start >= 0:
- for i, idx in enumerate(range(match_start, match_end+1)):
+ for i, idx in enumerate(range(match_start, match_end + 1)):
visited[idx] = 1
if i == 0:
labels[idx][label2id[entity_label]] = 1
@@ -215,4 +267,4 @@ def convert_example_to_feature(example, label2id, tokenizer, pad_default_label="
if sum(labels[idx]) == 0:
labels[idx][0] = 1
- return input_ids, token_type_ids, attention_mask, seq_len, labels
+ return input_ids, token_type_ids, attention_mask, seq_len, labels
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/evaluate.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/evaluate.py
old mode 100644
new mode 100755
index 24decd1fa..b2b464f73
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/evaluate.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/evaluate.py
@@ -38,37 +38,41 @@ def collate_fn(batch, pad_default_token_id=0):
max_len = max(seq_len_list)
for idx in range(len(input_ids_list)):
pad_len = max_len - seq_len_list[idx]
- input_ids_list[idx] = input_ids_list[idx] + [pad_default_token_id] * pad_len
+ input_ids_list[idx] = input_ids_list[idx] + [pad_default_token_id
+ ] * pad_len
token_type_ids_list[idx] = token_type_ids_list[idx] + [0] * pad_len
attention_mask_list[idx] = attention_mask_list[idx] + [0] * pad_len
- pad_label = labels_list[idx][0][:] # CLS label
+ pad_label = labels_list[idx][0][:] # CLS label
labels_list[idx] = labels_list[idx] + [pad_label] * pad_len
+ return paddle.to_tensor(input_ids_list), paddle.to_tensor(
+ token_type_ids_list), paddle.to_tensor(
+ attention_mask_list), paddle.to_tensor(
+ seq_len_list), paddle.to_tensor(labels_list)
- return paddle.to_tensor(input_ids_list), paddle.to_tensor(token_type_ids_list), paddle.to_tensor(attention_mask_list), paddle.to_tensor(seq_len_list), paddle.to_tensor(labels_list)
-
-def evaluate(model, data_loader, metric, examples, reverse_schemas, id2label, batch_size):
+def evaluate(model, data_loader, metric, examples, reverse_schemas, id2label,
+ batch_size):
model.eval()
metric.reset()
for idx, batch_data in tqdm(enumerate(data_loader)):
input_ids, token_type_ids, attention_masks, seq_lens, labels = batch_data
- logits = model(input_ids, token_type_ids=token_type_ids)
+ logits = model(input_ids, token_type_ids=token_type_ids)
# decoding logits into examples with spo_list
- batch_examples = examples[idx*batch_size : (idx+1)*batch_size]
- batch_pred_examples = decoding(batch_examples, reverse_schemas, logits, seq_lens, id2label)
+ batch_examples = examples[idx * batch_size:(idx + 1) * batch_size]
+ batch_pred_examples = decoding(batch_examples, reverse_schemas, logits,
+ seq_lens, id2label)
# count metric
metric.update(batch_examples, batch_pred_examples)
-
precision, recall, f1 = metric.accumulate()
return precision, recall, f1
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default=None, help="model path that you saved")
@@ -88,10 +92,10 @@ def evaluate(model, data_loader, metric, examples, reverse_schemas, id2label, ba
model_name = "ernie-1.0"
label2id, id2label = load_dict(args.save_label_path)
reverse_schema = load_reverse_schema(args.save_schema_path)
-
+
test_ds = load_dataset(read, data_path=args.test_path, lazy=False)
- examples = copy.deepcopy(test_ds)
-
+ examples = copy.deepcopy(test_ds)
+
tokenizer = ErnieTokenizer.from_pretrained(model_name)
trans_func = partial(convert_example_to_feature, tokenizer=tokenizer, label2id=label2id, pad_default_label="O", max_seq_len=args.max_seq_len)
test_ds = test_ds.map(trans_func, lazy=False)
@@ -103,12 +107,11 @@ def evaluate(model, data_loader, metric, examples, reverse_schemas, id2label, ba
# load model
loaded_state_dict = paddle.load(args.model_path)
ernie = ErnieModel.from_pretrained(model_name)
- model = ErnieForTokenClassification(ernie, num_classes=len(label2id))
+ model = ErnieForTokenClassification(ernie, num_classes=len(label2id))
model.load_dict(loaded_state_dict)
metric = SPOMetric()
-
+
# evalute on dev data
precision, recall, f1 = evaluate(model, test_loader, metric, examples, reverse_schema, id2label, args.batch_size)
print(f'evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1:.5f}')
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/model.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/model.py
old mode 100644
new mode 100755
index 9a78097b4..1d24df25c
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/model.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/model.py
@@ -12,24 +12,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
from paddle import nn
import paddle.nn.functional as F
class ErnieForTokenClassification(paddle.nn.Layer):
-
def __init__(self, ernie, num_classes, dropout=None):
super(ErnieForTokenClassification, self).__init__()
self.num_classes = num_classes
self.ernie = ernie
- self.dropout = nn.Dropout(dropout if dropout is not None else self.ernie.config["hidden_dropout_prob"])
- self.classifier = nn.Linear(self.ernie.config["hidden_size"], num_classes)
+ self.dropout = nn.Dropout(dropout if dropout is not None else
+ self.ernie.config["hidden_dropout_prob"])
+ self.classifier = nn.Linear(self.ernie.config["hidden_size"],
+ num_classes)
- def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None):
- sequence_output, _ = self.ernie(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask)
+ def forward(self,
+ input_ids,
+ token_type_ids=None,
+ position_ids=None,
+ attention_mask=None):
+ sequence_output, _ = self.ernie(
+ input_ids,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ attention_mask=attention_mask)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
-
+
return logits
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/predict.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/predict.py
old mode 100644
new mode 100755
index fd95ad532..abfeb6e8a
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/predict.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import copy
import json
import argparse
@@ -38,22 +37,27 @@ def predict(model, tokenizer, reverse_schemas, id2label):
# processing input text
splited_input_text = list(input_text)
- features = tokenizer(splited_input_text, is_split_into_words=True, max_seq_len=args.max_seq_len, return_length=True)
+ features = tokenizer(
+ splited_input_text,
+ is_split_into_words=True,
+ max_seq_len=args.max_seq_len,
+ return_length=True)
input_ids = paddle.to_tensor(features["input_ids"]).unsqueeze(0)
- token_type_ids = paddle.to_tensor(features["token_type_ids"]).unsqueeze(0)
+ token_type_ids = paddle.to_tensor(features[
+ "token_type_ids"]).unsqueeze(0)
seq_lens = paddle.to_tensor([features["seq_len"]])
# predict by model and decoding result
logits = model(input_ids, token_type_ids=token_type_ids)
- examples = [{"text":input_text}]
- pred_examples = decoding(examples, reverse_schema, logits, seq_lens, id2label)
+ examples = [{"text": input_text}]
+ pred_examples = decoding(examples, reverse_schema, logits, seq_lens,
+ id2label)
# print pred_examples
print(json.dumps(pred_examples, indent=4, ensure_ascii=False))
-
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default=None, help="model path that you saved")
@@ -77,9 +81,8 @@ def predict(model, tokenizer, reverse_schemas, id2label):
# load model
loaded_state_dict = paddle.load(args.model_path)
ernie = ErnieModel.from_pretrained(model_name)
- model = ErnieForTokenClassification(ernie, num_classes=len(label2id))
+ model = ErnieForTokenClassification(ernie, num_classes=len(label2id))
model.load_dict(loaded_state_dict)
# predicting
predict(model, tokenizer, reverse_schema, id2label)
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/run_evaluate.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/run_evaluate.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/run_predict.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/run_predict.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/run_train.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/run_train.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/train.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/train.py
old mode 100644
new mode 100755
index 0df6d5320..8bbbe445d
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/train.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import copy
import argparse
@@ -67,29 +66,32 @@ def collate_fn(batch, pad_default_token_id=0):
max_len = max(seq_len_list)
for idx in range(len(input_ids_list)):
pad_len = max_len - seq_len_list[idx]
- input_ids_list[idx] = input_ids_list[idx] + [pad_default_token_id] * pad_len
+ input_ids_list[idx] = input_ids_list[idx] + [pad_default_token_id
+ ] * pad_len
token_type_ids_list[idx] = token_type_ids_list[idx] + [0] * pad_len
- attention_mask_list[idx] = attention_mask_list[idx] + [0] * pad_len
-
- pad_label = labels_list[idx][0][:] # CLS label
- labels_list[idx] = labels_list[idx] + [pad_label] * pad_len
+ attention_mask_list[idx] = attention_mask_list[idx] + [0] * pad_len
+ pad_label = labels_list[idx][0][:] # CLS label
+ labels_list[idx] = labels_list[idx] + [pad_label] * pad_len
- return paddle.to_tensor(input_ids_list), paddle.to_tensor(token_type_ids_list), paddle.to_tensor(attention_mask_list), paddle.to_tensor(seq_len_list), paddle.to_tensor(labels_list)
+ return paddle.to_tensor(input_ids_list), paddle.to_tensor(
+ token_type_ids_list), paddle.to_tensor(
+ attention_mask_list), paddle.to_tensor(
+ seq_len_list), paddle.to_tensor(labels_list)
class DuIELoss(paddle.nn.Layer):
def __init__(self):
super(DuIELoss, self).__init__()
self.criterion = paddle.nn.BCEWithLogitsLoss(reduction="none")
-
+
def forward(self, logits, labels, masks):
labels = paddle.cast(labels, "float32")
loss = self.criterion(logits, labels)
mask = paddle.cast(masks, "float32")
loss = loss * mask.unsqueeze(-1)
loss = paddle.sum(loss.mean(axis=2), axis=1) / paddle.sum(mask, axis=1)
-
+
return loss.mean()
@@ -112,33 +114,53 @@ def train():
dev_examples = copy.deepcopy(dev_ds)
tokenizer = ErnieTokenizer.from_pretrained(model_name)
- trans_func = partial(convert_example_to_feature, tokenizer=tokenizer, label2id=label2id, pad_default_label="O", max_seq_len=args.max_seq_len)
+ trans_func = partial(
+ convert_example_to_feature,
+ tokenizer=tokenizer,
+ label2id=label2id,
+ pad_default_label="O",
+ max_seq_len=args.max_seq_len)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
-
+
# Warning: you should not set shuffle of dev_batch_sampler be True
- train_batch_sampler = paddle.io.BatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
- dev_batch_sampler = paddle.io.BatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
- train_loader = paddle.io.DataLoader(dataset=train_ds, batch_sampler=train_batch_sampler, collate_fn=collate_fn)
- dev_loader = paddle.io.DataLoader(dataset=dev_ds, batch_sampler=dev_batch_sampler, collate_fn=collate_fn)
+ train_batch_sampler = paddle.io.BatchSampler(
+ train_ds, batch_size=args.batch_size, shuffle=True)
+ dev_batch_sampler = paddle.io.BatchSampler(
+ dev_ds, batch_size=args.batch_size, shuffle=False)
+ train_loader = paddle.io.DataLoader(
+ dataset=train_ds,
+ batch_sampler=train_batch_sampler,
+ collate_fn=collate_fn)
+ dev_loader = paddle.io.DataLoader(
+ dataset=dev_ds, batch_sampler=dev_batch_sampler, collate_fn=collate_fn)
# configure model training
ernie = ErnieModel.from_pretrained(model_name)
model = ErnieForTokenClassification(ernie, num_classes=len(label2id))
num_training_steps = len(train_loader) * args.num_epoch
- lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_proportion)
- decay_params = [p.name for n, p in model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
+ lr_scheduler = LinearDecayWithWarmup(
+ args.learning_rate, num_training_steps, args.warmup_proportion)
+ decay_params = [
+ p.name for n, p in model.named_parameters()
+ if not any(nd in n for nd in ["bias", "norm"])
+ ]
grad_clip = paddle.nn.ClipGradByGlobalNorm(args.max_grad_norm)
- optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, parameters=model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params, grad_clip=grad_clip)
+ optimizer = paddle.optimizer.AdamW(
+ learning_rate=lr_scheduler,
+ parameters=model.parameters(),
+ weight_decay=args.weight_decay,
+ apply_decay_param_fun=lambda x: x in decay_params,
+ grad_clip=grad_clip)
criterion = DuIELoss()
metric = SPOMetric()
# start to train joint_model
global_step, best_f1 = 0, 0.
model.train()
- for epoch in range(1, args.num_epoch+1):
+ for epoch in range(1, args.num_epoch + 1):
for idx, batch_data in enumerate(train_loader()):
input_ids, token_type_ids, attention_masks, seq_lens, labels = batch_data
logits = model(input_ids, token_type_ids=token_type_ids)
@@ -151,25 +173,33 @@ def train():
optimizer.clear_grad()
if global_step > 0 and global_step % args.log_step == 0:
- print(f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}")
+ print(
+ f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}"
+ )
if global_step > 0 and global_step % args.eval_step == 0:
- precision, recall, f1 = evaluate(model, dev_loader, metric, dev_examples, reverse_schema, id2label, args.batch_size)
+ precision, recall, f1 = evaluate(model, dev_loader, metric,
+ dev_examples, reverse_schema,
+ id2label, args.batch_size)
model.train()
if f1 > best_f1:
- print(f"best F1 performence has been updated: {best_f1:.5f} --> {f1:.5f}")
+ print(
+ f"best F1 performence has been updated: {best_f1:.5f} --> {f1:.5f}"
+ )
best_f1 = f1
- paddle.save(model.state_dict(), f"{args.checkpoint}/best.pdparams")
- print(f'evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1:.5f}')
+ paddle.save(model.state_dict(),
+ f"{args.checkpoint}/best.pdparams")
+ print(
+ f'evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1:.5f}'
+ )
global_step += 1
paddle.save(model.state_dict(), f"{args.checkpoint}/final.pdparams")
-
-if __name__=="__main__":
+
+if __name__ == "__main__":
# generate label and schema dict
- generate_dict(args.ori_schema_path, args.save_label_path, args.save_schema_path)
+ generate_dict(args.ori_schema_path, args.save_label_path,
+ args.save_schema_path)
# train model
train()
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/extract_chinese_and_punctuation.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/extract_chinese_and_punctuation.py
old mode 100644
new mode 100755
index c17d5d8d1..ae8e23fcd
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/extract_chinese_and_punctuation.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/extract_chinese_and_punctuation.py
@@ -20,7 +20,8 @@
[0x2E80, 0x2E99], # Han # So [26] CJK RADICAL REPEAT, CJK RADICAL RAP
[0x2E9B, 0x2EF3
], # Han # So [89] CJK RADICAL CHOKE, CJK RADICAL C-SIMPLIFIED TURTLE
- [0x2F00, 0x2FD5], # Han # So [214] KANGXI RADICAL ONE, KANGXI RADICAL FLUTE
+ [0x2F00,
+ 0x2FD5], # Han # So [214] KANGXI RADICAL ONE, KANGXI RADICAL FLUTE
0x3005, # Han # Lm IDEOGRAPHIC ITERATION MARK
0x3007, # Han # Nl IDEOGRAPHIC NUMBER ZERO
[0x3021,
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/metric.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/metric.py
old mode 100644
new mode 100755
index 49eb6cf1e..1b4684ac2
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/metric.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/metric.py
@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
class SPOMetric(paddle.metric.Metric):
-
def __init__(self):
super(SPOMetric, self).__init__()
self.correct_count = 0.
@@ -25,7 +23,8 @@ def __init__(self):
self.recall_count = 0.
def update(self, batch_examples, batch_pred_examples):
- for pred_example, golden_example in zip(batch_pred_examples, batch_examples):
+ for pred_example, golden_example in zip(batch_pred_examples,
+ batch_examples):
pred_spo_list = self._del_duplicate(pred_example["spo_list"])
golden_spo_list = golden_example["spo_list"]
@@ -38,7 +37,9 @@ def update(self, batch_examples, batch_pred_examples):
def accumulate(self):
precision_score = self.correct_count / self.predict_count if self.predict_count > 0 else 0.
recall_score = self.correct_count / self.recall_count if self.recall_count > 0 else 0.
- f1_score = (2 * precision_score * recall_score) / (precision_score + recall_score) if (precision_score + recall_score) > 0 else 0.
+ f1_score = (2 * precision_score * recall_score) / (
+ precision_score + recall_score) if (precision_score + recall_score
+ ) > 0 else 0.
return precision_score, recall_score, f1_score
@@ -63,5 +64,3 @@ def _is_spo_in_list(self, spo, spo_list):
if spo in spo_list:
return True
return False
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/utils.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/utils.py
old mode 100644
new mode 100755
index 8c50e4a13..08a9d7450
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/relation_extraction/ernie/utils/utils.py
@@ -12,13 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import copy
import paddle
import random
import numpy as np
from collections import defaultdict
+
def set_seed(seed):
paddle.seed(seed)
random.seed(seed)
@@ -26,7 +26,8 @@ def set_seed(seed):
def get_object_keyname(reverse_schema, predicate, object_valname):
- object_type_keyname = reverse_schema[predicate]["object_type"][object_valname]
+ object_type_keyname = reverse_schema[predicate]["object_type"][
+ object_valname]
return object_type_keyname
@@ -36,28 +37,28 @@ def _decoding_by_label(label_logits):
last_yes = False
while i < lens:
if label_logits[i] == 0:
- last_yes=False
+ last_yes = False
else:
if last_yes:
- if i==0:
+ if i == 0:
labels.append([])
labels[-1].append(i)
else:
labels.append([])
labels[-1].append(i)
- last_yes=True
+ last_yes = True
i += 1
return labels
-
-
+
+
def parsing_entity(label_logits, label_I_logits):
- token_sids = np.argwhere(label_logits==1).squeeze(-1)
+ token_sids = np.argwhere(label_logits == 1).squeeze(-1)
extract_results = []
for token_sid in token_sids:
extract_result = [token_sid]
cursor = token_sid + 1
- while cursor=0.5] = 1
- logits[logits<0.5] = 0
+ logits = logits[1:(seq_len - 1)]
+ logits[logits >= 0.5] = 1
+ logits[logits < 0.5] = 0
assert len(logits) == len(example_text)
@@ -87,10 +90,16 @@ def decoding(examples, reverse_schema, batch_logits, batch_seq_len, id2label):
if parsing_results and s_o_indicator == "S":
for parsing_result in parsing_results:
- extract_subjects[predicate].append({"subject_ids":parsing_result, "subject_type":s_o_type})
+ extract_subjects[predicate].append({
+ "subject_ids": parsing_result,
+ "subject_type": s_o_type
+ })
elif parsing_results and s_o_indicator == "O":
for parsing_result in parsing_results:
- extract_objects[predicate].append({"object_ids": parsing_result, "object_type": s_o_type})
+ extract_objects[predicate].append({
+ "object_ids": parsing_result,
+ "object_type": s_o_type
+ })
# convert result to spo format
for predicate in extract_subjects.keys():
@@ -98,18 +107,27 @@ def decoding(examples, reverse_schema, batch_logits, batch_seq_len, id2label):
continue
for subject_result in extract_subjects[predicate]:
- subject_ids, subject_type = subject_result["subject_ids"], subject_result["subject_type"]
- subject = example_text[subject_ids[0]:subject_ids[-1]+1]
- spo = {"predicate":predicate, "subject": subject, "subject_type":subject_type, "object":{}, "object_type":{}}
+ subject_ids, subject_type = subject_result[
+ "subject_ids"], subject_result["subject_type"]
+ subject = example_text[subject_ids[0]:subject_ids[-1] + 1]
+ spo = {
+ "predicate": predicate,
+ "subject": subject,
+ "subject_type": subject_type,
+ "object": {},
+ "object_type": {}
+ }
for object_result in extract_objects[predicate]:
- object_ids, object_type = object_result["object_ids"], object_result["object_type"]
- object = example_text[object_ids[0]:object_ids[-1]+1]
- object_type_keyname = get_object_keyname(reverse_schema, predicate, object_type)
+ object_ids, object_type = object_result[
+ "object_ids"], object_result["object_type"]
+ object = example_text[object_ids[0]:object_ids[-1] + 1]
+ object_type_keyname = get_object_keyname(
+ reverse_schema, predicate, object_type)
spo["object"][object_type_keyname] = object
spo["object_type"][object_type_keyname] = object_type
spo_list.append(spo)
- example = {"text": example_text, "spo_list":spo_list}
+ example = {"text": example_text, "spo_list": spo_list}
batch_pred_examples.append(example)
-
+
return batch_pred_examples
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/README.md b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/model.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/model.py
old mode 100644
new mode 100755
index b2dc07c94..830bf97e9
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/model.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/model.py
@@ -12,20 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import paddle.nn as nn
+
class SkepForSquenceClassification(paddle.nn.Layer):
def __init__(self, skep, num_classes=2, dropout=None):
super(SkepForSquenceClassification, self).__init__()
self.num_classes = num_classes
self.skep = skep
- self.dropout = nn.Dropout(p=dropout if dropout is not None else self.skep.config["hidden_dropout_prob"])
- self.classifier = nn.Linear(self.skep.config["hidden_size"], self.num_classes)
+ self.dropout = nn.Dropout(p=dropout if dropout is not None else
+ self.skep.config["hidden_dropout_prob"])
+ self.classifier = nn.Linear(self.skep.config["hidden_size"],
+ self.num_classes)
- def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None):
- _, pooled_output = self.skep(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask)
+ def forward(self,
+ input_ids,
+ token_type_ids=None,
+ position_ids=None,
+ attention_mask=None):
+ _, pooled_output = self.skep(
+ input_ids,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ attention_mask=attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/predict.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/predict.py
old mode 100644
new mode 100755
index f8d932e9e..545375e1b
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/predict.py
@@ -23,21 +23,23 @@
def predict(text, text_pair, model, tokenizer, id2label, max_seq_len=256):
model.eval()
-
+
# processing input text
- encoded_inputs = tokenizer(text=text, text_pair=text_pair, max_seq_len=max_seq_len)
+ encoded_inputs = tokenizer(
+ text=text, text_pair=text_pair, max_seq_len=max_seq_len)
input_ids = paddle.to_tensor([encoded_inputs["input_ids"]])
token_type_ids = paddle.to_tensor([encoded_inputs["token_type_ids"]])
# predict by model and decoding result
logits = model(input_ids, token_type_ids=token_type_ids)
- label_id = paddle.argmax(logits, axis=1).numpy()[0]
+ label_id = paddle.argmax(logits, axis=1).numpy()[0]
# print predict result
- print(f"text: {text} \ntext_pair:{text_pair} \nlabel: {id2label[label_id]}")
+ print(
+ f"text: {text} \ntext_pair:{text_pair} \nlabel: {id2label[label_id]}")
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default=None, help="model path that you saved")
@@ -54,11 +56,10 @@ def predict(text, text_pair, model, tokenizer, id2label, max_seq_len=256):
# load model
loaded_state_dict = paddle.load(args.model_path)
ernie = SkepModel.from_pretrained(model_name)
- model = SkepForSquenceClassification(ernie, num_classes=2)
+ model = SkepForSquenceClassification(ernie, num_classes=2)
model.load_dict(loaded_state_dict)
-
+
# predict with model
text = "display#quality"
text_pair = "mk16i用后的体验感觉不错,就是有点厚,屏幕分辨率高,运行流畅,就是不知道能不能刷4.0的系统啊"
predict(text, text_pair, model, tokenizer, id2label, max_seq_len=args.max_seq_len)
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/run_predict.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/run_predict.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/run_train.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/run_train.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/train.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/train.py
old mode 100644
new mode 100755
index d1d742707..c5a46506f
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/train.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import copy
import argparse
@@ -30,7 +29,6 @@
warnings.filterwarnings("ignore")
-
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.")
@@ -63,7 +61,10 @@ def train():
train_ds = load_dataset("seabsa16", "phns", splits=["train"])
tokenizer = SkepTokenizer.from_pretrained(model_name)
- trans_func = partial(convert_example_to_feature, tokenizer=tokenizer, max_seq_len=args.max_seq_len)
+ trans_func = partial(
+ convert_example_to_feature,
+ tokenizer=tokenizer,
+ max_seq_len=args.max_seq_len)
train_ds = train_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
@@ -72,23 +73,37 @@ def train():
Stack(dtype="int64")
): fn(samples)
- train_batch_sampler = paddle.io.BatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
- train_loader = paddle.io.DataLoader(train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
+ train_batch_sampler = paddle.io.BatchSampler(
+ train_ds, batch_size=args.batch_size, shuffle=True)
+ train_loader = paddle.io.DataLoader(
+ train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
# configure model training
skep = SkepModel.from_pretrained(model_name)
- model = SkepForSquenceClassification(skep, num_classes=len(train_ds.label_list))
-
+ model = SkepForSquenceClassification(
+ skep, num_classes=len(train_ds.label_list))
+
num_training_steps = len(train_loader) * args.num_epoch
- lr_scheduler = LinearDecayWithWarmup(learning_rate=args.learning_rate, total_steps=num_training_steps, warmup=args.warmup_proportion)
- decay_params = [p.name for n, p in model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
+ lr_scheduler = LinearDecayWithWarmup(
+ learning_rate=args.learning_rate,
+ total_steps=num_training_steps,
+ warmup=args.warmup_proportion)
+ decay_params = [
+ p.name for n, p in model.named_parameters()
+ if not any(nd in n for nd in ["bias", "norm"])
+ ]
grad_clip = paddle.nn.ClipGradByGlobalNorm(args.max_grad_norm)
- optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, parameters=model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params, grad_clip=grad_clip)
+ optimizer = paddle.optimizer.AdamW(
+ learning_rate=lr_scheduler,
+ parameters=model.parameters(),
+ weight_decay=args.weight_decay,
+ apply_decay_param_fun=lambda x: x in decay_params,
+ grad_clip=grad_clip)
# start to train model
global_step = 1
model.train()
- for epoch in range(1, args.num_epoch+1):
+ for epoch in range(1, args.num_epoch + 1):
for batch_data in train_loader():
input_ids, token_type_ids, labels = batch_data
logits = model(input_ids, token_type_ids=token_type_ids)
@@ -100,17 +115,14 @@ def train():
optimizer.clear_grad()
if global_step > 0 and global_step % args.log_step == 0:
- print(f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}")
-
+ print(
+ f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}"
+ )
+
global_step += 1
paddle.save(model.state_dict(), f"{args.checkpoint}/final.pdparams")
-if __name__=="__main__":
+if __name__ == "__main__":
train()
-
-
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/utils/data.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/utils/data.py
old mode 100644
new mode 100755
index 1a06d8bf7..043bd7569
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/utils/data.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/utils/data.py
@@ -12,14 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import numpy as np
-def convert_example_to_feature(example, tokenizer, max_seq_len=512, is_test=False):
- encoded_inputs = tokenizer(text=example["text"], text_pair=example["text_pair"], max_seq_len=max_seq_len)
+
+def convert_example_to_feature(example,
+ tokenizer,
+ max_seq_len=512,
+ is_test=False):
+ encoded_inputs = tokenizer(
+ text=example["text"],
+ text_pair=example["text_pair"],
+ max_seq_len=max_seq_len)
if not is_test:
labels = np.array(example["label"], dtype="int64")
- return encoded_inputs["input_ids"], encoded_inputs["token_type_ids"], labels
+ return encoded_inputs["input_ids"], encoded_inputs[
+ "token_type_ids"], labels
else:
- return encoded_inputs["input_ids"], encoded_inputs["token_type_ids"]
\ No newline at end of file
+ return encoded_inputs["input_ids"], encoded_inputs["token_type_ids"]
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/utils/utils.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/utils/utils.py
old mode 100644
new mode 100755
index dafdba1f1..3da36391e
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/utils/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_aspect_level/utils/utils.py
@@ -12,13 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import copy
import paddle
import random
import numpy as np
from collections import defaultdict
+
def set_seed(seed):
paddle.seed(seed)
random.seed(seed)
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/README.md b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/README.md
old mode 100644
new mode 100755
index 8eb7430d3..816728f22
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/README.md
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/README.md
@@ -4,7 +4,7 @@
一般来讲,被人们所熟知的情感分析任务是语句级别的情感分析,其主要分析一段文本中整体蕴含的情感色彩。其常用于电影评论分析、网络论坛舆情分析等场景,如下面这句话所示。
-> 15.4寸笔记本的键盘确实爽,基本跟台式机差不多了,蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错 1
+> 15.4寸笔记本的键盘确实爽,基本跟台式机差不多了,蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错 1
为方便语句级别的情感分析任务建模,可将情感极性分为正向、负向、中性三个类别,这样将情感分析任务转变为一个分类问题,如图1所示:
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/evaluate.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/evaluate.py
old mode 100644
new mode 100755
index 51f8dc31c..19c56ea8f
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/evaluate.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/evaluate.py
@@ -24,13 +24,14 @@
from model import SkepForSquenceClassification
from utils.data import convert_example_to_feature
+
def evaluate(model, data_loader, metric):
model.eval()
metric.reset()
for idx, batch_data in tqdm(enumerate(data_loader)):
input_ids, token_type_ids, labels = batch_data
- logits = model(input_ids, token_type_ids=token_type_ids)
+ logits = model(input_ids, token_type_ids=token_type_ids)
# count metric
correct = metric.compute(logits, labels)
@@ -41,7 +42,7 @@ def evaluate(model, data_loader, metric):
return accuracy, precision, recall, f1
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default=None, help="model path that you saved")
@@ -70,12 +71,11 @@ def evaluate(model, data_loader, metric):
# load model
loaded_state_dict = paddle.load(args.model_path)
ernie = SkepModel.from_pretrained(model_name)
- model = SkepForSquenceClassification(ernie, num_classes=len(test_ds.label_list))
+ model = SkepForSquenceClassification(ernie, num_classes=len(test_ds.label_list))
model.load_dict(loaded_state_dict)
metric = AccuracyAndF1()
-
+
# evalute on dev data
accuracy, precision, recall, f1 = evaluate(model, test_loader, metric)
print(f'evalution result: accuracy: {accuracy}, precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1:.5f}')
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/model.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/model.py
old mode 100644
new mode 100755
index b2dc07c94..830bf97e9
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/model.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/model.py
@@ -12,20 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import paddle.nn as nn
+
class SkepForSquenceClassification(paddle.nn.Layer):
def __init__(self, skep, num_classes=2, dropout=None):
super(SkepForSquenceClassification, self).__init__()
self.num_classes = num_classes
self.skep = skep
- self.dropout = nn.Dropout(p=dropout if dropout is not None else self.skep.config["hidden_dropout_prob"])
- self.classifier = nn.Linear(self.skep.config["hidden_size"], self.num_classes)
+ self.dropout = nn.Dropout(p=dropout if dropout is not None else
+ self.skep.config["hidden_dropout_prob"])
+ self.classifier = nn.Linear(self.skep.config["hidden_size"],
+ self.num_classes)
- def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None):
- _, pooled_output = self.skep(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask)
+ def forward(self,
+ input_ids,
+ token_type_ids=None,
+ position_ids=None,
+ attention_mask=None):
+ _, pooled_output = self.skep(
+ input_ids,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ attention_mask=attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/predict.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/predict.py
old mode 100644
new mode 100755
index 63a0c5d7b..9daea3079
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/predict.py
@@ -29,7 +29,7 @@ def predict(model, tokenizer, id2label, max_seq_len=256):
continue
if input_text == "quit":
break
-
+
# processing input text
encoded_inputs = tokenizer(input_text, max_seq_len=max_seq_len)
input_ids = paddle.to_tensor([encoded_inputs["input_ids"]])
@@ -37,12 +37,12 @@ def predict(model, tokenizer, id2label, max_seq_len=256):
# predict by model and decoding result
logits = model(input_ids, token_type_ids=token_type_ids)
- label_id = paddle.argmax(logits, axis=1).numpy()[0]
+ label_id = paddle.argmax(logits, axis=1).numpy()[0]
print(f"label: {id2label[label_id]}")
-if __name__=="__main__":
+if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default=None, help="model path that you saved")
@@ -59,9 +59,8 @@ def predict(model, tokenizer, id2label, max_seq_len=256):
# load model
loaded_state_dict = paddle.load(args.model_path)
ernie = SkepModel.from_pretrained(model_name)
- model = SkepForSquenceClassification(ernie, num_classes=2)
+ model = SkepForSquenceClassification(ernie, num_classes=2)
model.load_dict(loaded_state_dict)
-
+
# predict with model
predict(model, tokenizer, id2label, max_seq_len=args.max_seq_len)
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/run_evaluate.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/run_evaluate.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/run_predict.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/run_predict.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/run_train.sh b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/run_train.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/train.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/train.py
old mode 100644
new mode 100755
index 16a18264d..f07566245
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/train.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/train.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import copy
import argparse
@@ -32,7 +31,6 @@
warnings.filterwarnings("ignore")
-
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.")
@@ -66,7 +64,10 @@ def train():
train_ds, dev_ds = load_dataset("chnsenticorp", splits=["train", "dev"])
tokenizer = SkepTokenizer.from_pretrained(model_name)
- trans_func = partial(convert_example_to_feature, tokenizer=tokenizer, max_seq_len=args.max_seq_len)
+ trans_func = partial(
+ convert_example_to_feature,
+ tokenizer=tokenizer,
+ max_seq_len=args.max_seq_len)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
@@ -76,27 +77,43 @@ def train():
Stack(dtype="int64")
): fn(samples)
- train_batch_sampler = paddle.io.BatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
- dev_batch_sampler = paddle.io.BatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
- train_loader = paddle.io.DataLoader(train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
- dev_loader = paddle.io.DataLoader(dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn)
+ train_batch_sampler = paddle.io.BatchSampler(
+ train_ds, batch_size=args.batch_size, shuffle=True)
+ dev_batch_sampler = paddle.io.BatchSampler(
+ dev_ds, batch_size=args.batch_size, shuffle=False)
+ train_loader = paddle.io.DataLoader(
+ train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
+ dev_loader = paddle.io.DataLoader(
+ dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn)
# configure model training
skep = SkepModel.from_pretrained(model_name)
- model = SkepForSquenceClassification(skep, num_classes=len(train_ds.label_list))
-
+ model = SkepForSquenceClassification(
+ skep, num_classes=len(train_ds.label_list))
+
num_training_steps = len(train_loader) * args.num_epoch
- lr_scheduler = LinearDecayWithWarmup(learning_rate=args.learning_rate, total_steps=num_training_steps, warmup=args.warmup_proportion)
- decay_params = [p.name for n, p in model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
+ lr_scheduler = LinearDecayWithWarmup(
+ learning_rate=args.learning_rate,
+ total_steps=num_training_steps,
+ warmup=args.warmup_proportion)
+ decay_params = [
+ p.name for n, p in model.named_parameters()
+ if not any(nd in n for nd in ["bias", "norm"])
+ ]
grad_clip = paddle.nn.ClipGradByGlobalNorm(args.max_grad_norm)
- optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, parameters=model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params, grad_clip=grad_clip)
+ optimizer = paddle.optimizer.AdamW(
+ learning_rate=lr_scheduler,
+ parameters=model.parameters(),
+ weight_decay=args.weight_decay,
+ apply_decay_param_fun=lambda x: x in decay_params,
+ grad_clip=grad_clip)
metric = AccuracyAndF1()
# start to train model
global_step, best_f1 = 1, 0.
model.train()
- for epoch in range(1, args.num_epoch+1):
+ for epoch in range(1, args.num_epoch + 1):
for batch_data in train_loader():
input_ids, token_type_ids, labels = batch_data
logits = model(input_ids, token_type_ids=token_type_ids)
@@ -108,25 +125,29 @@ def train():
optimizer.clear_grad()
if global_step > 0 and global_step % args.log_step == 0:
- print(f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}")
- if (global_step > 0 and global_step % args.eval_step == 0) or global_step == num_training_steps:
- accuracy, precision, recall, f1 = evaluate(model, dev_loader, metric)
+ print(
+ f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}"
+ )
+ if (global_step > 0 and global_step % args.eval_step == 0
+ ) or global_step == num_training_steps:
+ accuracy, precision, recall, f1 = evaluate(model, dev_loader,
+ metric)
model.train()
if f1 > best_f1:
- print(f"best F1 performence has been updated: {best_f1:.5f} --> {f1:.5f}")
+ print(
+ f"best F1 performence has been updated: {best_f1:.5f} --> {f1:.5f}"
+ )
best_f1 = f1
- paddle.save(model.state_dict(), f"{args.checkpoint}/best.pdparams")
- print(f'evalution result: accuracy: {accuracy:.5f}, precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1:.5f}')
+ paddle.save(model.state_dict(),
+ f"{args.checkpoint}/best.pdparams")
+ print(
+ f'evalution result: accuracy: {accuracy:.5f}, precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1:.5f}'
+ )
global_step += 1
paddle.save(model.state_dict(), f"{args.checkpoint}/final.pdparams")
-if __name__=="__main__":
+if __name__ == "__main__":
train()
-
-
-
-
-
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/utils/data.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/utils/data.py
old mode 100644
new mode 100755
index 71b9d13d0..d82757141
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/utils/data.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/utils/data.py
@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import numpy as np
+
def convert_example_to_feature(example, tokenizer, max_seq_len=512):
encoded_inputs = tokenizer(example["text"], max_seq_len=max_seq_len)
labels = np.array(example["label"], dtype="int64")
- return encoded_inputs["input_ids"], encoded_inputs["token_type_ids"], labels
\ No newline at end of file
+ return encoded_inputs["input_ids"], encoded_inputs["token_type_ids"], labels
diff --git a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/utils/utils.py b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/utils/utils.py
old mode 100644
new mode 100755
index dafdba1f1..3da36391e
--- a/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/utils/utils.py
+++ b/Paddle_Industry_Practice_Sample_Library/nlp_projects/sentiment_analysis/skep_sentence_level/utils/utils.py
@@ -12,13 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import copy
import paddle
import random
import numpy as np
from collections import defaultdict
+
def set_seed(seed):
paddle.seed(seed)
random.seed(seed)
diff --git a/Paddle_Industry_Practice_Sample_Library/paddlex_steel_defect_seg-master/HRNet.md b/Paddle_Industry_Practice_Sample_Library/paddlex_steel_defect_seg-master/HRNet.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/paddlex_steel_defect_seg-master/UNet.md b/Paddle_Industry_Practice_Sample_Library/paddlex_steel_defect_seg-master/UNet.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/paddlex_steel_defect_seg-master/images/csharp.png b/Paddle_Industry_Practice_Sample_Library/paddlex_steel_defect_seg-master/images/csharp.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/rebar_count/images/android.png b/Paddle_Industry_Practice_Sample_Library/rebar_count/images/android.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/robot_grab/code/point.py b/Paddle_Industry_Practice_Sample_Library/robot_grab/code/point.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/robot_grab/images/point.png b/Paddle_Industry_Practice_Sample_Library/robot_grab/images/point.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/robot_grab/images/rebot1.png b/Paddle_Industry_Practice_Sample_Library/robot_grab/images/rebot1.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/.gitignore b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/.gitignore
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/README.md b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/README.md b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/data.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/data.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/evaluate.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/evaluate.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/run_evaluate.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/run_evaluate.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/run_train.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/run_train.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/train.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/classification/train.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/demo.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/demo.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/deploy/predict.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/deploy/predict.py
old mode 100644
new mode 100755
index 2273249c9..1cb38fa34
--- a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/deploy/predict.py
+++ b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/deploy/predict.py
@@ -50,7 +50,8 @@ def create_predictor(self, model_path):
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
- raise ValueError("not find params file path {}".format(params_file))
+ raise ValueError("not find params file path {}".format(
+ params_file))
config = paddle.inference.Config(model_file, params_file)
if self.args.device == "gpu":
@@ -88,8 +89,8 @@ def create_predictor(self, model_path):
predictor.get_input_handle(name)
for name in predictor.get_input_names()
]
- output_handle = predictor.get_output_handle(predictor.get_output_names()
- [0])
+ output_handle = predictor.get_output_handle(predictor.get_output_names(
+ )[0])
return predictor, input_handles, output_handle
@@ -126,14 +127,15 @@ def predict_ext(self, args):
prediction) in enumerate(zip(seq_lens, predictions)):
idx = bid * args.batch_size + eid
tag_seq = [
- self.ext_id2label[idx] for idx in prediction[:seq_len][1:-1]
+ self.ext_id2label[idx]
+ for idx in prediction[:seq_len][1:-1]
]
text = ori_test_ds[idx]["text"]
aps = decoding(text[:args.ext_max_seq_len - 2], tag_seq)
for aid, ap in enumerate(aps):
aspect, opinions = ap[0], list(set(ap[1:]))
- aspect_text = self._concate_aspect_and_opinion(text, aspect,
- opinions)
+ aspect_text = self._concate_aspect_and_opinion(
+ text, aspect, opinions)
results.append({
"id": str(idx) + "_" + str(aid),
"aspect": aspect,
@@ -201,7 +203,9 @@ def post_process(self, args, ext_results, cls_results):
with open(args.save_path, "w", encoding="utf-8") as f:
for sentiment_result in sentiment_results:
- f.write(json.dumps(sentiment_result, ensure_ascii=False) + "\n")
+ f.write(
+ json.dumps(
+ sentiment_result, ensure_ascii=False) + "\n")
print(
f"sentiment analysis results has been saved to path: {args.save_path}"
)
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/deploy/run_predict.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/deploy/run_predict.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/export_model.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/export_model.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/README.md b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/data.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/data.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/evaluate.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/evaluate.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/run_evaluate.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/run_evaluate.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/run_train.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/run_train.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/train.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/extraction/train.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/imgs/design_cls_model.png b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/imgs/design_cls_model.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/imgs/design_ext_model.png b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/imgs/design_ext_model.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/imgs/sentiment_system.png b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/imgs/sentiment_system.png
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/README.md b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/README.md
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/data.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/data.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/evaluate.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/evaluate.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/performance_test.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/performance_test.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/quant_post.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/quant_post.py
old mode 100644
new mode 100755
index 70cea9e02..91e31137b
--- a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/quant_post.py
+++ b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/quant_post.py
@@ -48,8 +48,10 @@ def batch_generator_func():
batch_data[0].append(data[0])
batch_data[1].append(data[1])
if len(batch_data[0]) == args.batch_size:
- input_ids = Pad(axis=0, pad_val=0, dtype="int64")(batch_data[0])
- segment_ids = Pad(axis=0, pad_val=0, dtype="int64")(batch_data[1])
+ input_ids = Pad(axis=0, pad_val=0,
+ dtype="int64")(batch_data[0])
+ segment_ids = Pad(axis=0, pad_val=0,
+ dtype="int64")(batch_data[1])
yield [input_ids, segment_ids]
batch_data = [[], []]
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/run_evaluate.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/run_evaluate.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/run_performance_test.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/run_performance_test.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/run_quant.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/run_quant.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/run_train.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/run_train.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/train.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/pp_minilm/train.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/predict.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/predict.py
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/run_demo.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/run_demo.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/run_export_model.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/run_export_model.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/run_predict.sh b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/run_predict.sh
old mode 100644
new mode 100755
diff --git a/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/utils.py b/Paddle_Industry_Practice_Sample_Library/sentiment_analysis/utils.py
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/10HTC \351\243\236\346\241\250\350\256\272\346\226\207\345\244\215\347\216\260\346\214\221\346\210\230\350\265\233\357\274\210\347\254\254\345\233\233\346\234\237\357\274\211.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/10HTC \351\243\236\346\241\250\350\256\272\346\226\207\345\244\215\347\216\260\346\214\221\346\210\230\350\265\233\357\274\210\347\254\254\345\233\233\346\234\237\357\274\211.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/11\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232\345\256\236\347\216\260\347\256\200\345\215\225\347\232\204\347\253\257\345\210\260\347\253\257\350\207\252\345\212\250\351\251\276\351\251\266\346\250\241\345\236\213.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/11\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232\345\256\236\347\216\260\347\256\200\345\215\225\347\232\204\347\253\257\345\210\260\347\253\257\350\207\252\345\212\250\351\251\276\351\251\266\346\250\241\345\236\213.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/12\345\237\272\344\272\216\351\243\236\346\241\250\346\241\206\346\236\266\345\244\215\347\216\260PointNet.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/12\345\237\272\344\272\216\351\243\236\346\241\250\346\241\206\346\236\266\345\244\215\347\216\260PointNet.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/13\347\202\271\344\272\221\345\244\204\347\220\206\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260PointNet++.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/13\347\202\271\344\272\221\345\244\204\347\220\206\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260PointNet++.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/14\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\345\233\233\346\234\237\343\200\212Holistically-Nested Edge \343\200\213\345\206\240\345\206\233\346\226\271\346\241\210.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/14\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\345\233\233\346\234\237\343\200\212Holistically-Nested Edge \343\200\213\345\206\240\345\206\233\346\226\271\346\241\210.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/15BackgroundMattingV2\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/15BackgroundMattingV2\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/16\345\237\272\344\272\216 PaddlePaddle \346\241\206\346\236\266\347\232\204 SegNet \350\256\272\346\226\207\345\244\215\347\216\260.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/16\345\237\272\344\272\216 PaddlePaddle \346\241\206\346\236\266\347\232\204 SegNet \350\256\272\346\226\207\345\244\215\347\216\260.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/17yolact \345\244\215\347\216\260.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/17yolact \345\244\215\347\216\260.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/18\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232\345\237\272\344\272\216 Paddle2.0 \345\244\215\347\216\260 CheXNet \346\250\241\345\236\213.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/18\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232\345\237\272\344\272\216 Paddle2.0 \345\244\215\347\216\260 CheXNet \346\250\241\345\236\213.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/19 OCR.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/19 OCR.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/1\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\345\233\233\346\234\237YOLOv2\345\206\240\345\206\233\344\273\243\347\240\201.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/1\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\345\233\233\346\234\237YOLOv2\345\206\240\345\206\233\344\273\243\347\240\201.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/20\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232PointRend\357\274\232Image Segmentation as Rendering_CVPR2020.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/20\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232PointRend\357\274\232Image Segmentation as Rendering_CVPR2020.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/21\345\237\272\344\272\216Paddleseg\346\241\206\346\236\266\347\232\204CPNet\345\244\215\347\216\260.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/21\345\237\272\344\272\216Paddleseg\346\241\206\346\236\266\347\232\204CPNet\345\244\215\347\216\260.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/22\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232\345\237\272\344\272\216Paddle\345\244\215\347\216\260Rethinking BiSeNet.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/22\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232\345\237\272\344\272\216Paddle\345\244\215\347\216\260Rethinking BiSeNet.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/23\345\237\272\344\272\216Paddleseg\346\241\206\346\236\266\347\232\204ESPnetV2\345\244\215\347\216\260.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/23\345\237\272\344\272\216Paddleseg\346\241\206\346\236\266\347\232\204ESPnetV2\345\244\215\347\216\260.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/24\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232Progressive Growing of GANs\357\274\210PGAN\357\274\211.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/24\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232Progressive Growing of GANs\357\274\210PGAN\357\274\211.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/25ResNet_wide for CIFAR10.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/25ResNet_wide for CIFAR10.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/26\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232ProtoNet-\345\260\217\346\240\267\346\234\254\345\210\206\347\261\273\351\242\206\345\237\237\347\273\217\345\205\270\344\271\213\344\275\234.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/26\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232ProtoNet-\345\260\217\346\240\267\346\234\254\345\210\206\347\261\273\351\242\206\345\237\237\347\273\217\345\205\270\344\271\213\344\275\234.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/27\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260MLP-Mixer.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/27\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260MLP-Mixer.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/28Deep Networks with Stochastic Depth.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/28Deep Networks with Stochastic Depth.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/29R-Drop\357\274\232\346\221\230\344\270\213SOTA\347\232\204Dropout\346\255\243\345\210\231\345\214\226\347\255\226\347\225\245.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/29R-Drop\357\274\232\346\221\230\344\270\213SOTA\347\232\204Dropout\346\255\243\345\210\231\345\214\226\347\255\226\347\225\245.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/2\345\237\272\344\272\216PaddlePaddle\346\241\206\346\236\266\347\232\204YOLOv1\345\244\215\347\216\260.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/2\345\237\272\344\272\216PaddlePaddle\346\241\206\346\236\266\347\232\204YOLOv1\345\244\215\347\216\260.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/30PartialConv.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/30PartialConv.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/31\351\243\236\346\241\250\350\256\272\346\226\207\345\244\215\347\216\260\345\244\247\350\265\233\343\200\212Generative Adversarial Text-to-Image Synthesis\343\200\213.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/31\351\243\236\346\241\250\350\256\272\346\226\207\345\244\215\347\216\260\345\244\247\350\265\233\343\200\212Generative Adversarial Text-to-Image Synthesis\343\200\213.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/32pixel2style2pixel\347\254\254\345\233\233\346\234\237\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\344\270\200\345\220\215\344\273\243\347\240\201.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/32pixel2style2pixel\347\254\254\345\233\233\346\234\237\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\344\270\200\345\220\215\344\273\243\347\240\201.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/33SAM\347\254\254\345\233\233\346\234\237\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\344\270\200\345\220\215\344\273\243\347\240\201.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/33SAM\347\254\254\345\233\233\346\234\237\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\344\270\200\345\220\215\344\273\243\347\240\201.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/34TFT using paddle.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/34TFT using paddle.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/35\350\241\250\346\240\274\345\255\246\344\271\240\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260TabNet\347\275\221\347\273\234.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/35\350\241\250\346\240\274\345\255\246\344\271\240\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260TabNet\347\275\221\347\273\234.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/36\347\254\254\345\233\233\346\234\237\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\344\270\200\345\220\215\344\273\243\347\240\201\346\226\271\346\241\210-\344\272\272\344\275\223\345\247\277\346\200\201\344\274\260\350\256\241\357\274\210RMPose_PAFs\357\274\211.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/36\347\254\254\345\233\233\346\234\237\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\344\270\200\345\220\215\344\273\243\347\240\201\346\226\271\346\241\210-\344\272\272\344\275\223\345\247\277\346\200\201\344\274\260\350\256\241\357\274\210RMPose_PAFs\357\274\211.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/37\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\357\274\232\345\257\271\346\212\227\346\224\273\345\207\273.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/37\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\357\274\232\345\257\271\346\212\227\346\224\273\345\207\273.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/38\344\272\272\344\275\223\345\247\277\346\200\201\344\274\260\350\256\241\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260Hourglass.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/38\344\272\272\344\275\223\345\247\277\346\200\201\344\274\260\350\256\241\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260Hourglass.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/39SID-Paddle.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/39SID-Paddle.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/3\347\233\256\346\240\207\346\243\200\346\265\213\357\274\232\345\237\272\344\272\216Paddle\346\241\206\346\236\266\347\232\204EfficientDet\350\256\272\346\226\207\345\244\215\347\216\260.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/3\347\233\256\346\240\207\346\243\200\346\265\213\357\274\232\345\237\272\344\272\216Paddle\346\241\206\346\236\266\347\232\204EfficientDet\350\256\272\346\226\207\345\244\215\347\216\260.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/40Paddle_DIP.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/40Paddle_DIP.md
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/41ViLBERT-REC-Paddle.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/41ViLBERT-REC-Paddle.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/42\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232Emotion and Gender Classification.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/42\350\256\272\346\226\207\345\244\215\347\216\260\357\274\232Emotion and Gender Classification.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/43Paddle-Pulse.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/43Paddle-Pulse.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/44\345\237\272\344\272\216Paddle\345\244\215\347\216\260\350\256\272\346\226\207End to End Memory Networks.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/44\345\237\272\344\272\216Paddle\345\244\215\347\216\260\350\256\272\346\226\207End to End Memory Networks.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/45[\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\2544\346\234\237][DeepFEFM\345\206\240\345\206\233\346\226\271\346\241\210]DeepFEFM-paddle.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/45[\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\2544\346\234\237][DeepFEFM\345\206\240\345\206\233\346\226\271\346\241\210]DeepFEFM-paddle.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/46\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\357\274\232\345\237\272\344\272\216PaddleRec 24\345\260\217\346\227\266\345\277\253\351\200\237\345\244\215\347\216\260\347\273\217\345\205\270 CTR \351\242\204\344\274\260\347\256\227\346\263\225.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/46\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\357\274\232\345\237\272\344\272\216PaddleRec 24\345\260\217\346\227\266\345\277\253\351\200\237\345\244\215\347\216\260\347\273\217\345\205\270 CTR \351\242\204\344\274\260\347\256\227\346\263\225.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/47\350\256\272\346\226\207\345\244\215\347\216\260\346\216\250\350\215\220\350\265\233\351\201\223\347\254\254 2 \345\274\271\344\271\213\345\237\272\344\272\216PaddleRec \345\277\253\351\200\237\345\244\215\347\216\260 DIFM \347\256\227\346\263\225.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/47\350\256\272\346\226\207\345\244\215\347\216\260\346\216\250\350\215\220\350\265\233\351\201\223\347\254\254 2 \345\274\271\344\271\213\345\237\272\344\272\216PaddleRec \345\277\253\351\200\237\345\244\215\347\216\260 DIFM \347\256\227\346\263\225.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/48\345\215\217\345\220\214\346\216\250\347\220\206\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260NCR\346\250\241\345\236\213.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/48\345\215\217\345\220\214\346\216\250\347\220\206\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260NCR\346\250\241\345\236\213.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/49\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\357\274\232\350\264\235\345\217\266\346\226\257\347\245\236\347\273\217\347\275\221\347\273\234.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/49\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\357\274\232\350\264\235\345\217\266\346\226\257\347\245\236\347\273\217\347\275\221\347\273\234.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/4DenseDepth-paddle.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/4DenseDepth-paddle.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/50\345\257\271\346\212\227\346\200\247\350\207\252\347\274\226\347\240\201\345\231\250\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260AAE.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/50\345\257\271\346\212\227\346\200\247\350\207\252\347\274\226\347\240\201\345\231\250\357\274\232\345\237\272\344\272\216\351\243\236\346\241\250\345\244\215\347\216\260AAE.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/51\345\237\272\344\272\216PPSeg\346\241\206\346\236\266\347\232\204HRNet_W48_Contrast\345\244\215\347\216\260.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/51\345\237\272\344\272\216PPSeg\346\241\206\346\236\266\347\232\204HRNet_W48_Contrast\345\244\215\347\216\260.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/52CMML.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/52CMML.md
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/Paper_Recurrence_Championship_Project/5ScaledYOLOv4-Paddle.md b/Paper_Recurrence/Paper_Recurrence_Championship_Project/5ScaledYOLOv4-Paddle.md
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/6\345\237\272\344\272\216 PPDet \346\241\206\346\236\266\347\232\204 RetinaNet \345\244\215\347\216\260.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/6\345\237\272\344\272\216 PPDet \346\241\206\346\236\266\347\232\204 RetinaNet \345\244\215\347\216\260.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/7\343\200\220\351\243\236\346\241\250\346\241\206\346\236\266\350\256\272\346\226\207\345\244\215\347\216\260\343\200\221Show, Attend and Tell.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/7\343\200\220\351\243\236\346\241\250\346\241\206\346\236\266\350\256\272\346\226\207\345\244\215\347\216\260\343\200\221Show, Attend and Tell.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/8\343\200\220\351\243\236\346\241\250\346\241\206\346\236\266\350\256\272\346\226\207\345\244\215\347\216\260\343\200\221Bottom-Up and Top-Down.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/8\343\200\220\351\243\236\346\241\250\346\241\206\346\236\266\350\256\272\346\226\207\345\244\215\347\216\260\343\200\221Bottom-Up and Top-Down.md"
old mode 100644
new mode 100755
diff --git "a/Paper_Recurrence/Paper_Recurrence_Championship_Project/9\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\345\233\233\346\234\237YOLOv4\345\206\240\345\206\233\344\273\243\347\240\201.md" "b/Paper_Recurrence/Paper_Recurrence_Championship_Project/9\350\256\272\346\226\207\345\244\215\347\216\260\350\265\233\347\254\254\345\233\233\346\234\237YOLOv4\345\206\240\345\206\233\344\273\243\347\240\201.md"
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/README.md b/Paper_Recurrence/README.md
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/guide_of_paper_recurrence_cn.md b/Paper_Recurrence/guide_of_paper_recurrence_cn.md
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/imgs/output_27_20.png b/Paper_Recurrence/imgs/output_27_20.png
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/imgs/output_27_21.png b/Paper_Recurrence/imgs/output_27_21.png
old mode 100644
new mode 100755
diff --git a/Paper_Recurrence/imgs/output_9_0.png b/Paper_Recurrence/imgs/output_9_0.png
old mode 100644
new mode 100755
diff --git a/README.md b/README.md
old mode 100644
new mode 100755
index ecb15fb92..bd3da5b0b
--- a/README.md
+++ b/README.md
@@ -293,7 +293,7 @@
| NLP | [基于PaddleHub的疫情期间网民情绪识别](https://aistudio.baidu.com/aistudio/projectdetail/294224?channelType=0&channel=0) | [开发者CChan](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/82456)| 本项目为疫情期间网民情绪识别比赛的解决方案。使用了PaddleHub和ERNIE实现对疫情期间微博文本的情绪识别。 |
| NLP | [【Paddle打比赛】产品评论观点提取竞赛baseline](https://aistudio.baidu.com/aistudio/projectdetail/2417709) | [开发者w5688414](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/169515) | DataFountain基于BERT的产品评论观点提取竞赛baseline,增加了优化方法|
| NLP | [【Paddle打比赛】剧本角色情感识别baseline-精度0.676](https://aistudio.baidu.com/aistudio/projectdetail/2423977) | [开发者w5688414](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/169515) | 剧本角色情感识别baseline,使用bert模型|
-| 语音 |[【Paddle打比赛】语音合成](https://aistudio.baidu.com/aistudio/projectdetail/2793102?contributionType=1) | [开发者XYZ_916](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/812202)| 2021 新网银行智能语音大赛baseline。截止2021.11.17,该方案在总分榜第一,作品榜第二 |
+| 语音 |[【Paddle打比赛】语音合成](https://aistudio.baidu.com/aistudio/projectdetail/2793102?contributionType=1) | [开发者XYZ_916](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/812202)| 2021 新网银行智能语音大赛baseline。截止2021.11.17,该方案在总分榜第一,作品榜第二 |
| CV | [中文场景文字识别挑战赛baseline](https://aistudio.baidu.com/aistudio/projectdetail/229728?channelType=0&channel=0) | [小度AIStudio](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/7)| 中文场景文字识别挑战赛的baseline项目, 用于参赛选手借鉴参考 |
|CV|[【Paddle打比赛】手写字体OCR识别竞赛baseline](https://aistudio.baidu.com/aistudio/projectdetail/2606211)| [开发者Pink peach](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/532066)| 2021世界人工智能创新大赛,手写字体OCR识别竞赛baseline|
| CV | [2020 CCF BDCI: 遥感影像地块分割baseline](https://aistudio.baidu.com/aistudio/projectdetail/1090790?channelType=0&channel=0) | [开发者lxastro](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/349179)| 2020 CCF BDCI: 遥感影像地块分割的baseline模型库,包括baseline模型的训练方法和比赛的评测脚本。 |
@@ -349,4 +349,3 @@
### 贡献者
以下是awesome-DeepLearning贡献者列表: [yang zhou](https://youngzhou1999.github.io/cv/),[Niki_173](https://github.com/Niki173),[Twelveeee](https://github.com/Twelveeee),[buriedms](https://github.com/buriedms),[AqourAreA](https://github.com/AqourAreA),[zhangjin12138](https://github.com/zhangjin12138),[rerny](https://github.com/rerny),[LiuCongNLP](https://www.zhihu.com/people/LiuCongNLP),[LemonCherryFu](https://github.com/LemonCherryFu), [lutianhao](https://github.com/lutianhao)
-
diff --git "a/call_for_contribution/\351\253\230\346\240\241\345\237\271\350\256\255repo\345\217\212SIG\344\273\213\347\273\215.pdf" "b/call_for_contribution/\351\253\230\346\240\241\345\237\271\350\256\255repo\345\217\212SIG\344\273\213\347\273\215.pdf"
old mode 100644
new mode 100755
diff --git a/competition/README.md b/competition/README.md
old mode 100644
new mode 100755
index 2d830e766..4bcf510e4
--- a/competition/README.md
+++ b/competition/README.md
@@ -1,2 +1 @@
# 竞赛类
-
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/README.md" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/README.md"
old mode 100644
new mode 100755
index 375f1a3ec..e2d936f08
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/README.md"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/README.md"
@@ -53,8 +53,8 @@ SE模块通过压缩与激励两个操作实现特征通道的选择,为不同
``` python
from paddle import nn
-
-
+
+
class SEModule(nn.Layer):
def __init__(self, channels, reduction=16):
@@ -144,11 +144,11 @@ class CFFModel(nn.Layer):
##### 2.4.1.4 SR
-
+
SR模块是参考的2020年CVPR顶会《Global Context-Aware Progressive Aggregation Network for Salient Object Detection》里面提出的SR模块,可以起到填补CFF模块中不同层直接相乘导致的预测图中出现孔洞的作用。Paddle代码如下。
``` python
class SRModel(nn.Layer):
-
+
def __init__(self, in_channel):
super(SRModel, self).__init__()
self.conv1 = nn.Conv2D(in_channel, 256, kernel_size=3, stride=1, padding=1)
@@ -161,7 +161,7 @@ class CFFModel(nn.Layer):
w, b = out2[:, :256, :, :], out2[:, 256:, :, :]
return F.relu(w * out1 + b)
```
-
+
#### 2.4.2 FMFNet
@@ -173,7 +173,7 @@ SSM模块使用了空洞卷积,空洞卷积图如下
空洞卷积通过在卷积核之间填充0的方式增大卷积核的感受野,从而使相同大小的卷积核拥有更大的视野,可以获取更加全面的视觉信息。不同的空洞率有着不同的感受野,越大的空洞率拥有越大的视野。以空洞卷积为主的ASPP模块通过有着多种空洞率的卷积核的堆叠获得不同尺度的特征信息,经卷积压缩后输出,从而补充经池化操作损失的信息,这里我们只用了一种空洞率为3的卷积核。Paddle代码如下
``` python
-class SSM(nn.Layer):
+class SSM(nn.Layer):
def __init__(self):
super(SSM, self).__init__()
@@ -192,7 +192,7 @@ class SSM(nn.Layer):
##### 2..4.2.2 FIM
FIM模块通过高低层特征分别与邻层特征交互,再通过残差连接的方式细化特征,最终实现多层特征的优化。高层特征通过上采样,低层特征通过下采样,分别于低高层特征融合,实现交互。Paddle代码如下。
``` python
-class FIM(nn.Layer):
+class FIM(nn.Layer):
def __init__(self):
super(FIM, self).__init__()
@@ -205,7 +205,7 @@ class FIM(nn.Layer):
self.bn3 = nn.BatchNorm2D(64)
self.cv4 = nn.Conv2D(64, 64, 3, 1, 1)
self.bn4 = nn.BatchNorm2D(64)
-
+
def forward(self, l, h):
h_l = F.interpolate(h, size=l.shape[2:], mode='bilinear', align_corners=True)
l_h = F.interpolate(l, size=h.shape[2:], mode='bilinear', align_corners=True)
@@ -279,6 +279,3 @@ class PFM(nn.Layer):
#### 2.4.3总结
我们通过使用PaddleClas中提供的三个骨干网络(Res2Net200、ResNeXt101、SwinT384)和两种解码网络(ACFFNet、FMFNet)构建了四个模型,即Res2Net200+ACFFNet、ResNeXt101+ACFFNet、SwinT384+ACFFNet、Res2Net200+FMFNet。四个模型的加权融合得到了比赛的最高分。
-
-
-
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/ACFFNet.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/ACFFNet.py"
old mode 100644
new mode 100755
index a6becd1b7..6f2be5f86
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/ACFFNet.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/ACFFNet.py"
@@ -14,9 +14,11 @@ def __init__(self, in_channel_left, in_channel_right):
self.conv2 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2D(256)
- self.conv13 = nn.Conv2D(256, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
+ self.conv13 = nn.Conv2D(
+ 256, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.bn13 = nn.BatchNorm2D(256)
- self.conv31 = nn.Conv2D(256, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
+ self.conv31 = nn.Conv2D(
+ 256, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.bn31 = nn.BatchNorm2D(256)
def forward(self, left, down):
@@ -51,12 +53,15 @@ def forward(self, x):
class CFF(nn.Layer):
def __init__(self, in_channel_left, in_channel_down, in_channel_right):
super(CFF, self).__init__()
- self.conv0 = nn.Conv2D(in_channel_left, 256, kernel_size=3, stride=1, padding=1)
+ self.conv0 = nn.Conv2D(
+ in_channel_left, 256, kernel_size=3, stride=1, padding=1)
self.bn0 = nn.BatchNorm2D(256)
- self.conv1 = nn.Conv2D(in_channel_down, 256, kernel_size=3, stride=1, padding=1)
+ self.conv1 = nn.Conv2D(
+ in_channel_down, 256, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2D(256)
- self.conv2 = nn.Conv2D(in_channel_right, 256, kernel_size=3, stride=1, padding=1)
+ self.conv2 = nn.Conv2D(
+ in_channel_right, 256, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2D(256)
self.conv3 = nn.Conv2D(256 * 3, 256, kernel_size=3, stride=1, padding=1)
@@ -81,7 +86,8 @@ def forward(self, left, down, right):
class SR(nn.Layer):
def __init__(self, in_channel):
super(SR, self).__init__()
- self.conv1 = nn.Conv2D(in_channel, 256, kernel_size=3, stride=1, padding=1)
+ self.conv1 = nn.Conv2D(
+ in_channel, 256, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2D(256)
self.conv2 = nn.Conv2D(256, 512, kernel_size=3, stride=1, padding=1)
@@ -119,5 +125,9 @@ def forward(self, x):
x3 = self.srm3(self.cff3(x3, x4, x5))
x2 = self.srm2(self.cff2(x2, x3, x5))
- x2 = F.interpolate(self.linear2(x2), mode='bilinear', size=x.shape[2:], align_corners=True)
+ x2 = F.interpolate(
+ self.linear2(x2),
+ mode='bilinear',
+ size=x.shape[2:],
+ align_corners=True)
return [x2]
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/ACFFViT.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/ACFFViT.py"
old mode 100644
new mode 100755
index 53c430cbe..ae5d9ca51
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/ACFFViT.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/ACFFViT.py"
@@ -14,9 +14,11 @@ def __init__(self, in_channel_left, in_channel_right):
self.conv2 = nn.Conv2D(192, 192, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2D(192)
- self.conv13 = nn.Conv2D(192, 192, kernel_size=(1, 3), stride=1, padding=(0, 1))
+ self.conv13 = nn.Conv2D(
+ 192, 192, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.bn13 = nn.BatchNorm2D(192)
- self.conv31 = nn.Conv2D(192, 192, kernel_size=(3, 1), stride=1, padding=(1, 0))
+ self.conv31 = nn.Conv2D(
+ 192, 192, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.bn31 = nn.BatchNorm2D(192)
def forward(self, left, down):
@@ -51,15 +53,19 @@ def forward(self, x):
class CFF(nn.Layer):
def __init__(self, in_channel_left, in_channel_down, in_channel_right):
super(CFF, self).__init__()
- self.conv0 = nn.Conv2D(in_channel_left, 192, kernel_size=3, stride=1, padding=1)
+ self.conv0 = nn.Conv2D(
+ in_channel_left, 192, kernel_size=3, stride=1, padding=1)
self.bn0 = nn.BatchNorm2D(192)
- self.conv1 = nn.Conv2D(in_channel_down, 192, kernel_size=3, stride=1, padding=1)
+ self.conv1 = nn.Conv2D(
+ in_channel_down, 192, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2D(192)
- self.conv2 = nn.Conv2D(in_channel_right, 192, kernel_size=3, stride=1, padding=1)
+ self.conv2 = nn.Conv2D(
+ in_channel_right, 192, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2D(192)
- self.conv3 = nn.Conv2D(192 * 3, 192, kernel_size=3, stride=1, padding=1)
+ self.conv3 = nn.Conv2D(
+ 192 * 3, 192, kernel_size=3, stride=1, padding=1)
self.bn3 = nn.BatchNorm2D(192)
def forward(self, left, down, right):
@@ -81,9 +87,11 @@ def forward(self, left, down, right):
class SR(nn.Layer):
def __init__(self, in_channel):
super(SR, self).__init__()
- self.conv1 = nn.Conv2D(in_channel, 192, kernel_size=3, stride=1, padding=1)
+ self.conv1 = nn.Conv2D(
+ in_channel, 192, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2D(192)
- self.conv2 = nn.Conv2D(192, 192 * 2, kernel_size=3, stride=1, padding=1)
+ self.conv2 = nn.Conv2D(
+ 192, 192 * 2, kernel_size=3, stride=1, padding=1)
def forward(self, x):
out1 = F.relu(self.bn1(self.conv1(x)))
@@ -119,5 +127,9 @@ def forward(self, x):
x3 = self.srm3(self.fam3(x3, x4, x5))
x2 = self.srm2(self.fam2(x2, x3, x5))
- x2 = F.interpolate(self.linear2(x2), mode='bilinear', size=x.shape[2:], align_corners=True)
+ x2 = F.interpolate(
+ self.linear2(x2),
+ mode='bilinear',
+ size=x.shape[2:],
+ align_corners=True)
return [x2]
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/FMFNet.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/FMFNet.py"
old mode 100644
new mode 100755
index b1f0bf80d..6ec288285
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/FMFNet.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/FMFNet.py"
@@ -3,7 +3,7 @@
import paddle.nn.functional as F
-class SSM(nn.Layer): # 语义补充模块
+class SSM(nn.Layer): # 语义补充模块
def __init__(self):
super(SSM, self).__init__()
self.cv1 = nn.Conv2D(64, 64, 3, 1, 1)
@@ -14,11 +14,11 @@ def __init__(self):
def forward(self, x):
d1 = self.bn1(self.cv1(x))
d2 = self.bn2(self.cv2(x))
- out = F.relu(d1+d2+x)
+ out = F.relu(d1 + d2 + x)
return out
-class Feature_mutual_feedback_module(nn.Layer): # 特征互反馈模块 FMF
+class Feature_mutual_feedback_module(nn.Layer): # 特征互反馈模块 FMF
def __init__(self):
super(Feature_mutual_feedback_module, self).__init__()
self.cv1 = nn.Conv2D(64, 64, 3, 1, 1)
@@ -32,8 +32,10 @@ def __init__(self):
self.bn4 = nn.BatchNorm2D(64)
def forward(self, l, h):
- h_l = F.interpolate(h, size=l.shape[2:], mode='bilinear', align_corners=True)
- l_h = F.interpolate(l, size=h.shape[2:], mode='bilinear', align_corners=True)
+ h_l = F.interpolate(
+ h, size=l.shape[2:], mode='bilinear', align_corners=True)
+ l_h = F.interpolate(
+ l, size=h.shape[2:], mode='bilinear', align_corners=True)
h_l = F.relu(self.bn1(self.cv1(h_l)))
l_h = F.relu(self.bn2(self.cv2(l_h)))
@@ -46,7 +48,8 @@ def forward(self, l, h):
return l, h
-class Progressive_fusion_module(nn.Layer): # Progressive_fusion_module 渐进融合模块 PFM
+class Progressive_fusion_module(
+ nn.Layer): # Progressive_fusion_module 渐进融合模块 PFM
def __init__(self):
super(Progressive_fusion_module, self).__init__()
self.cv1 = nn.Conv2D(64, 64, 3, 1, 1)
@@ -74,22 +77,26 @@ def __init__(self):
self.bn8 = nn.BatchNorm2D(64)
def forward(self, out1, out2, out3, out4, out5):
- out5 = F.interpolate(out5, size=out4.shape[2:], mode='bilinear', align_corners=True)
+ out5 = F.interpolate(
+ out5, size=out4.shape[2:], mode='bilinear', align_corners=True)
out5 = F.relu(self.bn1(self.cv1(out5)))
out4 = paddle.concat([out4, out5], axis=1)
out4 = F.relu(self.bn2(self.cv2(out4)))
- out4 = F.interpolate(out4, size=out3.shape[2:], mode='bilinear', align_corners=True)
+ out4 = F.interpolate(
+ out4, size=out3.shape[2:], mode='bilinear', align_corners=True)
out4 = F.relu(self.bn3(self.cv3(out4)))
out3 = paddle.concat([out3, out4], axis=1)
out3 = F.relu(self.bn4(self.cv4(out3)))
- out3 = F.interpolate(out3, size=out2.shape[2:], mode='bilinear', align_corners=True)
+ out3 = F.interpolate(
+ out3, size=out2.shape[2:], mode='bilinear', align_corners=True)
out3 = F.relu(self.bn5(self.cv5(out3)))
out2 = paddle.concat([out2, out3], axis=1)
out2 = F.relu(self.bn6(self.cv6(out2)))
- out2 = F.interpolate(out1, size=out2.shape[2:], mode='bilinear', align_corners=True)
+ out2 = F.interpolate(
+ out1, size=out2.shape[2:], mode='bilinear', align_corners=True)
out2 = F.relu(self.bn7(self.cv7(out2)))
out1 = paddle.concat([out1, out2], axis=1)
@@ -117,9 +124,11 @@ class SEModule(nn.Layer):
def __init__(self, channels, reduction=16):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2D(1)
- self.fc1 = nn.Conv2D(channels, channels // reduction, kernel_size=1, padding=0)
+ self.fc1 = nn.Conv2D(
+ channels, channels // reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU()
- self.fc2 = nn.Conv2D(channels // reduction, channels, kernel_size=1, padding=0)
+ self.fc2 = nn.Conv2D(
+ channels // reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, inputs):
@@ -135,12 +144,18 @@ class FMFModel(nn.Layer):
def __init__(self, backbone):
super(FMFModel, self).__init__()
self.backbone = backbone
- self.se1, self.se2,self.se3,self.se4,self.se5 = SEModule(64), SEModule(64), SEModule(64), SEModule(64), SEModule(64)
- self.squeeze5 = nn.Sequential(nn.Conv2D(2048, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
- self.squeeze4 = nn.Sequential(nn.Conv2D(1024, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
- self.squeeze3 = nn.Sequential(nn.Conv2D(512, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
- self.squeeze2 = nn.Sequential(nn.Conv2D(256, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
- self.squeeze1 = nn.Sequential(nn.Conv2D(64, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
+ self.se1, self.se2, self.se3, self.se4, self.se5 = SEModule(
+ 64), SEModule(64), SEModule(64), SEModule(64), SEModule(64)
+ self.squeeze5 = nn.Sequential(
+ nn.Conv2D(2048, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
+ self.squeeze4 = nn.Sequential(
+ nn.Conv2D(1024, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
+ self.squeeze3 = nn.Sequential(
+ nn.Conv2D(512, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
+ self.squeeze2 = nn.Sequential(
+ nn.Conv2D(256, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
+ self.squeeze1 = nn.Sequential(
+ nn.Conv2D(64, 64, 3, 1, 1), nn.BatchNorm2D(64), nn.ReLU())
self.fa1, self.fa2, self.fa3, self.fa4, self.fa5 = SSM(), \
SSM(), \
SSM(), \
@@ -168,5 +183,9 @@ def forward(self, x):
out1, out2, out3, out4, out5 = self.FMF2(out1, out2, out3, out4, out5)
out1, out2, out3, out4, out5 = self.FMF3(out1, out2, out3, out4, out5)
out = self.mso(out1, out2, out3, out4, out5)
- out = F.interpolate(self.linear(out), size=x.shape[2:], mode='bilinear', align_corners=True)
+ out = F.interpolate(
+ self.linear(out),
+ size=x.shape[2:],
+ mode='bilinear',
+ align_corners=True)
return [out]
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/__pycache__/ACFFNet.cpython-39.pyc" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/__pycache__/ACFFNet.cpython-39.pyc"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/__pycache__/ACFFViT.cpython-39.pyc" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/__pycache__/ACFFViT.cpython-39.pyc"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/__pycache__/FMFNet.cpython-39.pyc" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/Decoder/__pycache__/FMFNet.cpython-39.pyc"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/README.md" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/README.md"
old mode 100644
new mode 100755
index 7e758fb64..3a13b6478
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/README.md"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/README.md"
@@ -1,19 +1,19 @@
-# 使用方法
-
-1.https://aistudio.baidu.com/aistudio/competition/detail/78
-
-去这里下载数据集,解压。将train_50k_mask重命名为mask,将train_image重命名为image
-
-2.运行utils的py文件,需要填的路径为mask所在的路径
-
-因为是四个模型,所以下面的训练和测试需要在四个模型上进行。
-## 训练
-训练需要运行train.py,参数配置看内部的函数config,每个参数都有对应的解释
-,数据路径为参数train_dataset。填写image所在路径即可。
-
-## 测试
-训练可以在test.py文件中进行。按照参数提示输入即可
-
-## AI Studio项目
-如果觉得自己调试运行代码有麻烦,可在AI Studio中fork我的项目直接一键运行,项目链接:
-https://aistudio.baidu.com/aistudio/projectdetail/2210815
+# 使用方法
+
+1.https://aistudio.baidu.com/aistudio/competition/detail/78
+
+去这里下载数据集,解压。将train_50k_mask重命名为mask,将train_image重命名为image
+
+2.运行utils的py文件,需要填的路径为mask所在的路径
+
+因为是四个模型,所以下面的训练和测试需要在四个模型上进行。
+## 训练
+训练需要运行train.py,参数配置看内部的函数config,每个参数都有对应的解释
+,数据路径为参数train_dataset。填写image所在路径即可。
+
+## 测试
+训练可以在test.py文件中进行。按照参数提示输入即可
+
+## AI Studio项目
+如果觉得自己调试运行代码有麻烦,可在AI Studio中fork我的项目直接一键运行,项目链接:
+https://aistudio.baidu.com/aistudio/projectdetail/2210815
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/Res2Net.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/Res2Net.py"
old mode 100644
new mode 100755
index ff301b728..9d235e5fd
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/Res2Net.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/Res2Net.py"
@@ -1,4 +1,3 @@
-
import paddle
from paddleseg.utils import utils
from paddle import ParamAttr
@@ -203,7 +202,10 @@ def __init__(self, layers=200, scales=4, width=26, pretrained=False):
self.block_list.append(bottleneck_block)
shortcut = True
if pretrained:
- utils.load_entire_model(self, 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_pretrained.pdparams')
+ utils.load_entire_model(
+ self,
+ 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_pretrained.pdparams'
+ )
def forward(self, inputs):
y = self.conv1_1(inputs)
@@ -215,14 +217,12 @@ def forward(self, inputs):
for block in self.block_list:
y = block(y)
n += 1
- if n==3:
+ if n == 3:
y2 = y
- elif n==3+12:
+ elif n == 3 + 12:
y3 = y
- elif n==3+12+48:
+ elif n == 3 + 12 + 48:
y4 = y
y5 = y
return y2, y3, y4, y5
-
-
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/ResNeXt.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/ResNeXt.py"
old mode 100644
new mode 100755
index 9fa69865d..d79aa8af6
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/ResNeXt.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/ResNeXt.py"
@@ -5,7 +5,7 @@
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
-from paddle.nn import MaxPool2D
+from paddle.nn import MaxPool2D
class ConvBNLayer(nn.Layer):
@@ -368,7 +368,10 @@ def __init__(self, layers=101, cardinality=32, width=16, pretrained=False):
name="layer4.2")
if pretrained:
- utils.load_entire_model(self, 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x16_wsl_pretrained.pdparams')
+ utils.load_entire_model(
+ self,
+ 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x16_wsl_pretrained.pdparams'
+ )
def forward(self, inputs):
x = self._conv_stem(inputs)
@@ -411,4 +414,4 @@ def forward(self, inputs):
x = self._conv4_1(x)
x = self._conv4_2(x)
y4 = x
- return y1, y2, y3, y4
\ No newline at end of file
+ return y1, y2, y3, y4
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/SwinTransformer.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/SwinTransformer.py"
old mode 100644
new mode 100755
index 2bbe6bcd9..735a286a5
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/SwinTransformer.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/SwinTransformer.py"
@@ -6,7 +6,6 @@
import paddle.nn as nn
from paddle.nn.initializer import TruncatedNormal, Constant, Normal
-
trunc_normal_ = TruncatedNormal(std=.02)
normal_ = Normal
zeros_ = Constant(value=0.)
@@ -25,7 +24,7 @@ def drop_path(x, drop_prob=0., training=False):
if drop_prob == 0. or not training:
return x
keep_prob = paddle.to_tensor(1 - drop_prob)
- shape = (paddle.shape(x)[0],) + (1,) * (x.ndim - 1)
+ shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype)
random_tensor = paddle.floor(random_tensor) # binarize
output = x.divide(keep_prob) * random_tensor
@@ -63,7 +62,7 @@ def __init__(self,
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
+ self.scale = qk_scale or head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
@@ -313,7 +312,7 @@ def __init__(self,
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
+ self.scale = qk_scale or head_dim**-0.5
# define a parameter table of relative position bias
# 2*Wh-1 * 2*Ww-1, nH
@@ -338,7 +337,7 @@ def __init__(self,
relative_coords = relative_coords.transpose(
[1, 2, 0]) # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[
- 0] - 1 # shift to start from 0
+ 0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
@@ -362,7 +361,7 @@ def forward(self, x, mask=None):
B_, N, C = x.shape
qkv = self.qkv(x).reshape(
[B_, N, 3, self.num_heads, C // self.num_heads]).transpose(
- [2, 0, 3, 1, 4])
+ [2, 0, 3, 1, 4])
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
@@ -764,7 +763,7 @@ def forward(self, x):
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (
- self.patch_size[0] * self.patch_size[1])
+ self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
@@ -822,7 +821,7 @@ def __init__(self,
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
- self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
+ self.num_features = int(embed_dim * 2**(self.num_layers - 1))
self.mlp_ratio = mlp_ratio
self.pretrained = pretrained
@@ -854,9 +853,9 @@ def __init__(self,
self.layers = nn.LayerList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
- dim=int(embed_dim * 2 ** i_layer),
- input_resolution=(patches_resolution[0] // (2 ** i_layer),
- patches_resolution[1] // (2 ** i_layer)),
+ dim=int(embed_dim * 2**i_layer),
+ input_resolution=(patches_resolution[0] // (2**i_layer),
+ patches_resolution[1] // (2**i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
@@ -880,7 +879,10 @@ def __init__(self,
self.apply(self._init_weights)
if pretrained:
- utils.load_entire_model(self, 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams')
+ utils.load_entire_model(
+ self,
+ 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams'
+ )
def _init_weights(self, m):
if isinstance(m, nn.Linear):
@@ -928,7 +930,6 @@ def flops(self):
for _, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[
- 0] * self.patches_resolution[1] // (2 ** self.num_layers)
+ 0] * self.patches_resolution[1] // (2**self.num_layers)
flops += self.num_features * self.num_classes
return flops
-
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/__pycache__/Res2Net.cpython-39.pyc" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/__pycache__/Res2Net.cpython-39.pyc"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/__pycache__/ResNeXt.cpython-39.pyc" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/__pycache__/ResNeXt.cpython-39.pyc"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/__pycache__/SwinTransformer.cpython-39.pyc" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/backcbone/__pycache__/SwinTransformer.cpython-39.pyc"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/dataset.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/dataset.py"
old mode 100644
new mode 100755
index fa98cdc05..fb6a0f803
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/dataset.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/dataset.py"
@@ -26,16 +26,16 @@ def __init__(self, cfg):
super(Data, self).__init__()
self.cfg = cfg
# 下面是数据增强等
- self.randombrig = RandomBrightness()
- self.normalize = Normalize(mean=cfg.mean, std=cfg.std)
- self.randomcrop = RandomCrop()
- self.blur = RandomBlur()
+ self.randombrig = RandomBrightness()
+ self.normalize = Normalize(mean=cfg.mean, std=cfg.std)
+ self.randomcrop = RandomCrop()
+ self.blur = RandomBlur()
self.randomvflip = RandomVorizontalFlip()
self.randomhflip = RandomHorizontalFlip()
- self.resize = Resize(384, 384)
- self.totensor = ToTensor()
+ self.resize = Resize(384, 384)
+ self.totensor = ToTensor()
# 读数据
- with open(cfg.datapath+'/'+cfg.mode+'.txt', 'r') as lines:
+ with open(cfg.datapath + '/' + cfg.mode + '.txt', 'r') as lines:
self.samples = []
for line in lines:
self.samples.append(line.strip())
@@ -43,8 +43,10 @@ def __init__(self, cfg):
def __getitem__(self, idx):
name = self.samples[idx]
# 读取图片
- image = cv2.imread(self.cfg.datapath+'/image/'+name+'.JPEG')[:,:,::-1].astype(np.float32)
- mask = cv2.imread(self.cfg.datapath + '/mask/' + name + '.png')[:, :, ::-1].astype(np.float32)
+ image = cv2.imread(self.cfg.datapath + '/image/' + name +
+ '.JPEG')[:, :, ::-1].astype(np.float32)
+ mask = cv2.imread(self.cfg.datapath + '/mask/' + name +
+ '.png')[:, :, ::-1].astype(np.float32)
H, W, C = image.shape
# 训练的时候的数据增强
if self.cfg.mode == 'train':
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/models.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/models.py"
old mode 100644
new mode 100755
index 06f840678..0b3832fd1
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/models.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/models.py"
@@ -19,4 +19,5 @@ def ResNeXtandACFFNet():
def SwinTandACFFNet():
- return ACFFViTModel(backbone=SwinTransformer_large_patch4_window12_384(pretrained=False))
+ return ACFFViTModel(backbone=SwinTransformer_large_patch4_window12_384(
+ pretrained=False))
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/test.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/test.py"
old mode 100644
new mode 100755
index 840e94a90..2a70a9fb7
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/test.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/test.py"
@@ -40,10 +40,8 @@ def read_img(self, path):
# 归一化原图
def norm_img(self, im):
- return cv2.normalize(im.astype('float'),
- None,
- 0.0, 1.0,
- cv2.NORM_MINMAX)
+ return cv2.normalize(
+ im.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
# 保存预测图片
def save(self, img_path, save_path=None):
@@ -57,18 +55,22 @@ def save(self, img_path, save_path=None):
for num in range(len(H)):
mae_pred = k_pred[num].unsqueeze(0)
path = img_path + '/mask/' + maskpath[num] + '.png'
- mae_mask = paddle.to_tensor(self.read_img(path)).unsqueeze(0).unsqueeze(0)
- mae_pred = F.interpolate(mae_pred, size=mae_mask.shape[2:], mode='bilinear')
+ mae_mask = paddle.to_tensor(self.read_img(path)).unsqueeze(
+ 0).unsqueeze(0)
+ mae_pred = F.interpolate(
+ mae_pred, size=mae_mask.shape[2:], mode='bilinear')
if save_path:
- save_paths = os.path.join(save_path, self.cfg.datapath.split('/')[-1])
+ save_paths = os.path.join(
+ save_path, self.cfg.datapath.split('/')[-1])
if not os.path.exists(save_paths):
os.makedirs(save_paths)
mae_pred = mae_pred[0].transpose((1, 2, 0)) * 255
- cv2.imwrite(save_paths + '/' + maskpath[num], mae_pred.cpu().numpy())
+ cv2.imwrite(save_paths + '/' + maskpath[num],
+ mae_pred.cpu().numpy())
-if __name__=='__main__':
+if __name__ == '__main__':
from models import Res2NetandACFFNet
from models import Res2NetandFMFNet
from models import ResNeXtandACFFNet
@@ -81,11 +83,3 @@ def save(self, img_path, save_path=None):
save_path = 'write the path where you want to save the test mask'
test = Test(dataset, img_path, model, model_list)
test.save(img_path, save_path)
-
-
-
-
-
-
-
-
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/train.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/train.py"
old mode 100644
new mode 100755
index 42c92be87..ff6c93421
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/train.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/train.py"
@@ -14,13 +14,25 @@ def config():
parser.add_argument('--Min_LR', default=0.0001, help='min lr')
parser.add_argument('--Max_LR', default=0.01, help='max lr')
parser.add_argument('--epoch', default=20, help='epoches')
- parser.add_argument('--mode_path', default=False, help='where your pretrained model')
- parser.add_argument('--train_bs', default=12, help='batch size for training')
+ parser.add_argument(
+ '--mode_path', default=False, help='where your pretrained model')
+ parser.add_argument(
+ '--train_bs', default=12, help='batch size for training')
parser.add_argument('--test_bs', default=12, help='batch size for testing')
- parser.add_argument('--show_step', default=20, help='if step%show_step == 0 : print the info')
- parser.add_argument('--train_dataset', default=r'E:\Saliency\Dataset\DUST\DUTS-TR', help='where your train dataset')
- parser.add_argument('--save_path', default='weight', help='where you want to save the pdparams files')
- parser.add_argument('--save_iter', default=1, help=r'every iter to save model')
+ parser.add_argument(
+ '--show_step',
+ default=20,
+ help='if step%show_step == 0 : print the info')
+ parser.add_argument(
+ '--train_dataset',
+ default=r'E:\Saliency\Dataset\DUST\DUTS-TR',
+ help='where your train dataset')
+ parser.add_argument(
+ '--save_path',
+ default='weight',
+ help='where you want to save the pdparams files')
+ parser.add_argument(
+ '--save_iter', default=1, help=r'every iter to save model')
cag = parser.parse_args()
return cag
@@ -45,9 +57,15 @@ def train(Dataset, Network, savepath):
if not os.path.exists(savepath):
os.makedirs(savepath)
cfg = Dataset.Config(
- snapshot=cag.mode_path, datapath=cag.train_dataset, savepath=savepath,
- mode='train', batch=cag.train_bs, lr=cag.Max_LR, momen=0.9, decay=5e-4, epoch=cag.epoch
- )
+ snapshot=cag.mode_path,
+ datapath=cag.train_dataset,
+ savepath=savepath,
+ mode='train',
+ batch=cag.train_bs,
+ lr=cag.Max_LR,
+ momen=0.9,
+ decay=5e-4,
+ epoch=cag.epoch)
data = Dataset.Data(cfg)
loader = DataLoader(
@@ -55,8 +73,7 @@ def train(Dataset, Network, savepath):
batch_size=cfg.batch,
shuffle=True,
num_workers=12,
- use_shared_memory=False,
- )
+ use_shared_memory=False, )
# 设置网络
net = Network
@@ -67,9 +84,13 @@ def train(Dataset, Network, savepath):
print('total params : ', total_params)
# 设置优化器和学习率衰减
- scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=cag.Max_LR, T_max=len(loader)*cag.epoch)
- optimizer = paddle.optimizer.Momentum(parameters=net.parameters(), learning_rate=scheduler, momentum=0.9,
- weight_decay=cfg.decay)
+ scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
+ learning_rate=cag.Max_LR, T_max=len(loader) * cag.epoch)
+ optimizer = paddle.optimizer.Momentum(
+ parameters=net.parameters(),
+ learning_rate=scheduler,
+ momentum=0.9,
+ weight_decay=cfg.decay)
global_step = 0
# 开始训练
@@ -93,20 +114,25 @@ def train(Dataset, Network, savepath):
if batch_idx % cag.show_step == 0:
msg = '%s | step:%d/%d/%d (%.2f%%) | lr=%.6f | loss=%.6f | %s ' % (
- datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), batch_idx, epoch + 1, cfg.epoch,
- batch_idx / (50000 / cag.train_bs) * 100, optimizer.get_lr(), loss.item()
- , image.shape)
+ datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
+ batch_idx, epoch + 1, cfg.epoch,
+ batch_idx / (50000 / cag.train_bs) * 100,
+ optimizer.get_lr(), loss.item(), image.shape)
print(msg)
if epoch % cag.save_iter == 0:
- paddle.save(net.state_dict(), cfg.savepath + '/model-' + str(epoch + 1) + '.pdparams')
+ paddle.save(
+ net.state_dict(),
+ cfg.savepath + '/model-' + str(epoch + 1) + '.pdparams')
end = datetime.datetime.now()
spend = int((end - start).seconds)
mins = spend // 60
secon = spend % 60
loss_list = '%.5f' % np.mean(loss_list)
- print(f'this epoch spend {mins} m {secon} s and the average loss is {loss_list}', '\n')
+ print(
+ f'this epoch spend {mins} m {secon} s and the average loss is {loss_list}',
+ '\n')
if __name__ == '__main__':
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/transform.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/transform.py"
old mode 100644
new mode 100755
index b46740a13..891bd808f
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/transform.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/transform.py"
@@ -42,8 +42,10 @@ def __init__(self, H, W):
self.W = W
def __call__(self, image, mask):
- image = cv2.resize(image, dsize=(self.W, self.H), interpolation=cv2.INTER_LINEAR)
- mask = cv2.resize(mask, dsize=(self.W, self.H), interpolation=cv2.INTER_LINEAR)
+ image = cv2.resize(
+ image, dsize=(self.W, self.H), interpolation=cv2.INTER_LINEAR)
+ mask = cv2.resize(
+ mask, dsize=(self.W, self.H), interpolation=cv2.INTER_LINEAR)
return image, mask
@@ -59,13 +61,13 @@ def __call__(self, image, mask):
# 随机裁剪
class RandomCrop(object):
def __call__(self, image, mask):
- H,W,_ = image.shape
- randw = np.random.randint(W/8)
- randh = np.random.randint(H/8)
+ H, W, _ = image.shape
+ randw = np.random.randint(W / 8)
+ randh = np.random.randint(H / 8)
offseth = 0 if randh == 0 else np.random.randint(randh)
offsetw = 0 if randw == 0 else np.random.randint(randw)
- p0, p1, p2, p3 = offseth, H+offseth-randh, offsetw, W+offsetw-randw
- return image[p0:p1,p2:p3, :], mask[p0:p1,p2:p3, :]
+ p0, p1, p2, p3 = offseth, H + offseth - randh, offsetw, W + offsetw - randw
+ return image[p0:p1, p2:p3, :], mask[p0:p1, p2:p3, :]
# 随机高斯模糊
@@ -99,6 +101,3 @@ def __call__(self, image, mask):
image, mask = image.astype(np.float32), mask.astype(np.float32)
mask = mask.mean(axis=0, keepdims=True)
return image, mask
-
-
-
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/utils.py" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/utils.py"
old mode 100644
new mode 100755
index 5c12c312a..a3c8a754f
--- "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/utils.py"
+++ "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/code/utils.py"
@@ -4,17 +4,15 @@
# 输入标签集的地址 win or linux 可能有点路径问题 就是\\ 与 /的差别,这里是基于win,如果路径有问题需要换成linux的/
def write_file_name(path):
- text = glob.glob(path + '\*\*')
+ text = glob.glob(path + '\*\*')
if '\\' in text[0]:
- text = [i.split('\\')[-1] for i in text]
+ text = [i.split('\\')[-1] for i in text]
else:
text = [i.split('/')[-1] for i in text]
- cwd = os.path.abspath(os.path.dirname(path))
+ cwd = os.path.abspath(os.path.dirname(path))
with open(cwd + '\\train.txt', 'a+') as f:
for i in text:
- f.write(i[0:-4]+'\n')
+ f.write(i[0:-4] + '\n')
write_file_name(r'I:\compa\pix\mask')
-
-
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/ACFFNet.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/ACFFNet.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/CFF.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/CFF.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/Dilation.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/Dilation.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/FMFNet.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/FMFNet.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/MFR.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/MFR.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/QQ\346\210\252\345\233\27620210806131723.png" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/QQ\346\210\252\345\233\27620210806131723.png"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/Res2Net.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/Res2Net.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/ResNeXt.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/ResNeXt.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/SE.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/SE.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/SR.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/SR.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/SwinT.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/SwinT.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/acff.png" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/acff.png"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/fcn.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/fcn.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/image_1.jpg" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/image_1.jpg"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/model.png" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/model.png"
old mode 100644
new mode 100755
diff --git "a/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/sod.png" "b/competition/\345\215\212\347\233\221\347\235\243\347\233\256\346\240\207\345\256\232\344\275\215\346\257\224\350\265\233\347\254\254\344\270\200\345\220\215\346\250\241\345\236\213/images/sod.png"
old mode 100644
new mode 100755
diff --git a/docs/Makefile b/docs/Makefile
old mode 100644
new mode 100755
diff --git a/docs/README.md b/docs/README.md
old mode 100644
new mode 100755
diff --git a/docs/conf.py b/docs/conf.py
old mode 100644
new mode 100755
index a365ed0dc..cccdd3b80
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -14,22 +14,18 @@
# import sys
# sys.path.insert(0, os.path.abspath('.'))
-
# -- Project information -----------------------------------------------------
project = 'PaddleEdu'
copyright = '2021, nlpers'
author = 'nlpers'
-
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = [
- "myst_parser"
-]
+extensions = ["myst_parser"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -48,9 +44,7 @@
import myst_parser
-source_parsers = {
- '.md': myst_parser
-}
+source_parsers = {'.md': myst_parser}
source_suffix = ['.rst', '.md']
# -- Options for HTML output -------------------------------------------------
@@ -75,4 +69,3 @@
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
html_logo = 'images/paddle.png'
-
diff --git a/docs/images/CNN/Application.png b/docs/images/CNN/Application.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/Biological_Vision.png b/docs/images/CNN/Biological_Vision.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/CNN.png b/docs/images/CNN/CNN.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/CV_Task.png b/docs/images/CNN/CV_Task.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/Deformable_Convolution/Example_comparison1.png b/docs/images/CNN/Deformable_Convolution/Example_comparison1.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/Deformable_Convolution/Example_comparison2.png b/docs/images/CNN/Deformable_Convolution/Example_comparison2.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/Deformable_Convolution/Illustration1.png b/docs/images/CNN/Deformable_Convolution/Illustration1.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/Deformable_Convolution/Illustration2.png b/docs/images/CNN/Deformable_Convolution/Illustration2.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/Deformable_Convolution/principle.png b/docs/images/CNN/Deformable_Convolution/principle.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/Early_CNN.jpg b/docs/images/CNN/Early_CNN.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/Traditional_CV_Method.png b/docs/images/CNN/Traditional_CV_Method.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/avgpooling_maxpooling.png b/docs/images/CNN/avgpooling_maxpooling.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/1_Convolution.png b/docs/images/CNN/convolution_operator/1_Convolution.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/2D_Convolution.png b/docs/images/CNN/convolution_operator/2D_Convolution.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/3D-UNet.jpg b/docs/images/CNN/convolution_operator/3D-UNet.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/3DCNN.png b/docs/images/CNN/convolution_operator/3DCNN.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/3D_Convolution.png b/docs/images/CNN/convolution_operator/3D_Convolution.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/BottleNeck.png b/docs/images/CNN/convolution_operator/BottleNeck.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Depthwise_Convolution.png b/docs/images/CNN/convolution_operator/Depthwise_Convolution.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Dilated_Convolution_Receptive_Field.png b/docs/images/CNN/convolution_operator/Dilated_Convolution_Receptive_Field.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Dilated_Convolution_r1.png b/docs/images/CNN/convolution_operator/Dilated_Convolution_r1.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Dilated_Convolution_r2.png b/docs/images/CNN/convolution_operator/Dilated_Convolution_r2.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Dilated_Convolution_r4.png b/docs/images/CNN/convolution_operator/Dilated_Convolution_r4.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Fully_Connected.png b/docs/images/CNN/convolution_operator/Fully_Connected.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Group_Convolution.png b/docs/images/CNN/convolution_operator/Group_Convolution.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Group_Convolution_Example.png b/docs/images/CNN/convolution_operator/Group_Convolution_Example.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Inception_module.jpg b/docs/images/CNN/convolution_operator/Inception_module.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Inverse_Convolution_Example.png b/docs/images/CNN/convolution_operator/Inverse_Convolution_Example.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Inverse_Convolution_Matrix.png b/docs/images/CNN/convolution_operator/Inverse_Convolution_Matrix.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Local_Connection.png b/docs/images/CNN/convolution_operator/Local_Connection.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/MobileNetv1_Separable_Convolution.png b/docs/images/CNN/convolution_operator/MobileNetv1_Separable_Convolution.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Pointwise_Convolution_1.png b/docs/images/CNN/convolution_operator/Pointwise_Convolution_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Pointwise_Convolution_256.png b/docs/images/CNN/convolution_operator/Pointwise_Convolution_256.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Receptive_Field_3x3.png b/docs/images/CNN/convolution_operator/Receptive_Field_3x3.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Receptive_Field_5x5.png b/docs/images/CNN/convolution_operator/Receptive_Field_5x5.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Spatial_Separable_Convolutions.png b/docs/images/CNN/convolution_operator/Spatial_Separable_Convolutions.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Standard_Convolution.png b/docs/images/CNN/convolution_operator/Standard_Convolution.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Standard_Convolution_Example.png b/docs/images/CNN/convolution_operator/Standard_Convolution_Example.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Standard_Convolution_Matrix.png b/docs/images/CNN/convolution_operator/Standard_Convolution_Matrix.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Standard_Convolution_out_1.png b/docs/images/CNN/convolution_operator/Standard_Convolution_out_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Standard_Convolution_out_256.png b/docs/images/CNN/convolution_operator/Standard_Convolution_out_256.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Transpose_Convolution_s1.gif b/docs/images/CNN/convolution_operator/Transpose_Convolution_s1.gif
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Transpose_Convolution_s2.gif b/docs/images/CNN/convolution_operator/Transpose_Convolution_s2.gif
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Upsample.png b/docs/images/CNN/convolution_operator/Upsample.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Visualize_CNN.png b/docs/images/CNN/convolution_operator/Visualize_CNN.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/Weight_Shared.png b/docs/images/CNN/convolution_operator/Weight_Shared.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/concept_zhihai.png b/docs/images/CNN/convolution_operator/concept_zhihai.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/convolution.png b/docs/images/CNN/convolution_operator/convolution.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/examp1_conv.png b/docs/images/CNN/convolution_operator/examp1_conv.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/examp1_ori.png b/docs/images/CNN/convolution_operator/examp1_ori.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/examp1_result.png b/docs/images/CNN/convolution_operator/examp1_result.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/examp2_conv.png b/docs/images/CNN/convolution_operator/examp2_conv.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/examp2_ori.png b/docs/images/CNN/convolution_operator/examp2_ori.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/examp2_result.png b/docs/images/CNN/convolution_operator/examp2_result.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/examp3_conv.png b/docs/images/CNN/convolution_operator/examp3_conv.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/examp3_ori.png b/docs/images/CNN/convolution_operator/examp3_ori.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/examp3_result.png b/docs/images/CNN/convolution_operator/examp3_result.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/example1.jpg b/docs/images/CNN/convolution_operator/example1.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/example2.jpg b/docs/images/CNN/convolution_operator/example2.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/fc.png b/docs/images/CNN/convolution_operator/fc.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/mini_batch.jpg b/docs/images/CNN/convolution_operator/mini_batch.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/multi_in_channel.png b/docs/images/CNN/convolution_operator/multi_in_channel.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/multi_out_channel.png b/docs/images/CNN/convolution_operator/multi_out_channel.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/padding.png b/docs/images/CNN/convolution_operator/padding.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/convolution_operator/stride.png b/docs/images/CNN/convolution_operator/stride.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/example.png b/docs/images/CNN/example.png
old mode 100644
new mode 100755
diff --git a/docs/images/CNN/k-max_pooling.png b/docs/images/CNN/k-max_pooling.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/1.png b/docs/images/computer_vision/OCR/1.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/CRNN_1.png b/docs/images/computer_vision/OCR/CRNN_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/CTPN_1.png b/docs/images/computer_vision/OCR/CTPN_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/CTPN_2.png b/docs/images/computer_vision/OCR/CTPN_2.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/DB_1.png b/docs/images/computer_vision/OCR/DB_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/DB_2.png b/docs/images/computer_vision/OCR/DB_2.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/DB_3.png b/docs/images/computer_vision/OCR/DB_3.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/EAST_1.png b/docs/images/computer_vision/OCR/EAST_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/EAST_2.png b/docs/images/computer_vision/OCR/EAST_2.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/ocr_diffi.png b/docs/images/computer_vision/OCR/ocr_diffi.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/ocr_dl.png b/docs/images/computer_vision/OCR/ocr_dl.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/ocr_email.png b/docs/images/computer_vision/OCR/ocr_email.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/OCR/ocr_trans.png b/docs/images/computer_vision/OCR/ocr_trans.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/AlexNet.png b/docs/images/computer_vision/classification/AlexNet.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/AlexNet_Error_Rate.png b/docs/images/computer_vision/classification/AlexNet_Error_Rate.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/DarkNet19.png b/docs/images/computer_vision/classification/DarkNet19.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/DarkNet53.png b/docs/images/computer_vision/classification/DarkNet53.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/DarkNet53_Acc.png b/docs/images/computer_vision/classification/DarkNet53_Acc.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/GoogLeNet.png b/docs/images/computer_vision/classification/GoogLeNet.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/GoogLeNet_Error_Rate.png b/docs/images/computer_vision/classification/GoogLeNet_Error_Rate.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/Inception_Module.jpg b/docs/images/computer_vision/classification/Inception_Module.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/LeNet.png b/docs/images/computer_vision/classification/LeNet.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/LeNet_Error_Rate.png b/docs/images/computer_vision/classification/LeNet_Error_Rate.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/MLP.png b/docs/images/computer_vision/classification/MLP.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/Multi-head_Attention.jpg b/docs/images/computer_vision/classification/Multi-head_Attention.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/PatchEmbed.jpg b/docs/images/computer_vision/classification/PatchEmbed.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/Positional_Encoding.png b/docs/images/computer_vision/classification/Positional_Encoding.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/VGG.png b/docs/images/computer_vision/classification/VGG.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/VGG_Error_Rate.png b/docs/images/computer_vision/classification/VGG_Error_Rate.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/VIT_pic2.png b/docs/images/computer_vision/classification/VIT_pic2.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/VIT_pic3.png b/docs/images/computer_vision/classification/VIT_pic3.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/VIT_pic6.png b/docs/images/computer_vision/classification/VIT_pic6.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/VIT_pic8.png b/docs/images/computer_vision/classification/VIT_pic8.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/ViT.png b/docs/images/computer_vision/classification/ViT.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/ViT_ACC.png b/docs/images/computer_vision/classification/ViT_ACC.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/ViT_Model.jpg b/docs/images/computer_vision/classification/ViT_Model.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/attention.png b/docs/images/computer_vision/classification/attention.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/error_in_CIFAR10.png b/docs/images/computer_vision/classification/error_in_CIFAR10.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/feature_map_visualization.png b/docs/images/computer_vision/classification/feature_map_visualization.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/res2net_block.png b/docs/images/computer_vision/classification/res2net_block.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/res2net_result.png b/docs/images/computer_vision/classification/res2net_result.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/res2net_result2.png b/docs/images/computer_vision/classification/res2net_result2.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/res2net_result3.png b/docs/images/computer_vision/classification/res2net_result3.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/res2net_result4.png b/docs/images/computer_vision/classification/res2net_result4.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/residual_block.png b/docs/images/computer_vision/classification/residual_block.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/resnext_cardinality.jpg b/docs/images/computer_vision/classification/resnext_cardinality.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/resnext_inceptionnet.jpg b/docs/images/computer_vision/classification/resnext_inceptionnet.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/resnext_result.jpg b/docs/images/computer_vision/classification/resnext_result.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/resnext_structure.jpg b/docs/images/computer_vision/classification/resnext_structure.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/resnext_wsl_results.jpg b/docs/images/computer_vision/classification/resnext_wsl_results.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/sigmoid_derivation.png b/docs/images/computer_vision/classification/sigmoid_derivation.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/single_neuronal_network.png b/docs/images/computer_vision/classification/single_neuronal_network.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/st_config.jpg b/docs/images/computer_vision/classification/st_config.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/st_net.jpg b/docs/images/computer_vision/classification/st_net.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/st_result.jpg b/docs/images/computer_vision/classification/st_result.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/st_result2.jpg b/docs/images/computer_vision/classification/st_result2.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/st_result3.jpg b/docs/images/computer_vision/classification/st_result3.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/st_swandvit.jpg b/docs/images/computer_vision/classification/st_swandvit.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/st_swb.jpg b/docs/images/computer_vision/classification/st_swb.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/st_swmsa.jpg b/docs/images/computer_vision/classification/st_swmsa.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/classification/st_window.jpg b/docs/images/computer_vision/classification/st_window.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/gridmask-0.png b/docs/images/computer_vision/image_augmentation/gridmask-0.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/hide-and-seek-visual.png b/docs/images/computer_vision/image_augmentation/hide-and-seek-visual.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/image_aug_samples.png b/docs/images/computer_vision/image_augmentation/image_aug_samples.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/image_aug_samples_s.jpg b/docs/images/computer_vision/image_augmentation/image_aug_samples_s.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/image_aug_samples_s_en.jpg b/docs/images/computer_vision/image_augmentation/image_aug_samples_s_en.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/main_image_aug.png b/docs/images/computer_vision/image_augmentation/main_image_aug.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/main_image_aug_s.jpg b/docs/images/computer_vision/image_augmentation/main_image_aug_s.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/test_autoaugment.jpeg b/docs/images/computer_vision/image_augmentation/test_autoaugment.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/test_baseline.jpeg b/docs/images/computer_vision/image_augmentation/test_baseline.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/test_cutmix.png b/docs/images/computer_vision/image_augmentation/test_cutmix.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/test_cutout.jpeg b/docs/images/computer_vision/image_augmentation/test_cutout.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/test_gridmask.jpeg b/docs/images/computer_vision/image_augmentation/test_gridmask.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/test_hideandseek.jpeg b/docs/images/computer_vision/image_augmentation/test_hideandseek.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/test_mixup.png b/docs/images/computer_vision/image_augmentation/test_mixup.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/test_randaugment.jpeg b/docs/images/computer_vision/image_augmentation/test_randaugment.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/image_augmentation/test_randomerassing.jpeg b/docs/images/computer_vision/image_augmentation/test_randomerassing.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Anchor.png b/docs/images/computer_vision/object_detection/Anchor.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Application.jpg b/docs/images/computer_vision/object_detection/Application.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Bounding_Box.png b/docs/images/computer_vision/object_detection/Bounding_Box.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Calculate_IOU.png b/docs/images/computer_vision/object_detection/Calculate_IOU.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Classification.png b/docs/images/computer_vision/object_detection/Classification.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Classification_Detection.png b/docs/images/computer_vision/object_detection/Classification_Detection.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Development_Path.png b/docs/images/computer_vision/object_detection/Development_Path.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Development_Path_2.png b/docs/images/computer_vision/object_detection/Development_Path_2.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Different_IOU.png b/docs/images/computer_vision/object_detection/Different_IOU.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/IOU.png b/docs/images/computer_vision/object_detection/IOU.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/NMS.png b/docs/images/computer_vision/object_detection/NMS.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Predicted_Box.png b/docs/images/computer_vision/object_detection/Predicted_Box.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/Region_Proposal.png b/docs/images/computer_vision/object_detection/Region_Proposal.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/object_detection/SoftNMS.png b/docs/images/computer_vision/object_detection/SoftNMS.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924225733868.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924225733868.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924230358992.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924230358992.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231236147-16326803284991.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231236147-16326803284991.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231236147.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231236147.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231439096-16326804545332.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231439096-16326804545332.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231439096.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231439096.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231902861.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924231902861.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924232337417.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924232337417.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924233250610-16326804902533.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924233250610-16326804902533.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924233250610.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924233250610.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924233654968.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924233654968.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924233908829.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924233908829.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924234255382.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924234255382.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924234451247.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924234451247.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924235518873.png b/docs/images/computer_vision/semantic_segmentation/DeeplabV3/image-20210924235518873.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/0CA60C26FE09854E8A9A5A03E2849B77997C8406_size40_w800_h373.jpeg b/docs/images/computer_vision/semantic_segmentation/Overview/0CA60C26FE09854E8A9A5A03E2849B77997C8406_size40_w800_h373.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/2D-3D-S.png b/docs/images/computer_vision/semantic_segmentation/Overview/2D-3D-S.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/3Dbaseline.png b/docs/images/computer_vision/semantic_segmentation/Overview/3Dbaseline.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/68e53612019f446f988346cf9bf11f49.png b/docs/images/computer_vision/semantic_segmentation/Overview/68e53612019f446f988346cf9bf11f49.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/Adobepeople.png b/docs/images/computer_vision/semantic_segmentation/Overview/Adobepeople.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/CamVid.png b/docs/images/computer_vision/semantic_segmentation/Overview/CamVid.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/DAVIS.png b/docs/images/computer_vision/semantic_segmentation/Overview/DAVIS.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/DeepLabv3-network-structure-diagram-used-in-this-study.jpg b/docs/images/computer_vision/semantic_segmentation/Overview/DeepLabv3-network-structure-diagram-used-in-this-study.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/FCN.png b/docs/images/computer_vision/semantic_segmentation/Overview/FCN.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/KITTI.png b/docs/images/computer_vision/semantic_segmentation/Overview/KITTI.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/MINC.png b/docs/images/computer_vision/semantic_segmentation/Overview/MINC.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/MScoco4.png b/docs/images/computer_vision/semantic_segmentation/Overview/MScoco4.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/NYUDv2.png b/docs/images/computer_vision/semantic_segmentation/Overview/NYUDv2.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/OSD.png b/docs/images/computer_vision/semantic_segmentation/Overview/OSD.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/RGB-D.png b/docs/images/computer_vision/semantic_segmentation/Overview/RGB-D.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/SBD.png b/docs/images/computer_vision/semantic_segmentation/Overview/SBD.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/SUN3D.png b/docs/images/computer_vision/semantic_segmentation/Overview/SUN3D.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/SUNRGBD.png b/docs/images/computer_vision/semantic_segmentation/Overview/SUNRGBD.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/SYNTHIA.png b/docs/images/computer_vision/semantic_segmentation/Overview/SYNTHIA.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/Semantic_all.png b/docs/images/computer_vision/semantic_segmentation/Overview/Semantic_all.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/Sematic_divide.png b/docs/images/computer_vision/semantic_segmentation/Overview/Sematic_divide.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/ShapeNet.png b/docs/images/computer_vision/semantic_segmentation/Overview/ShapeNet.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/SiftFlow.png b/docs/images/computer_vision/semantic_segmentation/Overview/SiftFlow.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/YouTube.png b/docs/images/computer_vision/semantic_segmentation/Overview/YouTube.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/bigdatapointcloud.png b/docs/images/computer_vision/semantic_segmentation/Overview/bigdatapointcloud.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/citycaps.png b/docs/images/computer_vision/semantic_segmentation/Overview/citycaps.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/d901efaf1843473a8cb8c24fad35c2b0.png b/docs/images/computer_vision/semantic_segmentation/Overview/d901efaf1843473a8cb8c24fad35c2b0.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/d9a922820685470594d543bab7a0f9f5.png b/docs/images/computer_vision/semantic_segmentation/Overview/d9a922820685470594d543bab7a0f9f5.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/datasets_all.png b/docs/images/computer_vision/semantic_segmentation/Overview/datasets_all.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/deeplabv3plus.png b/docs/images/computer_vision/semantic_segmentation/Overview/deeplabv3plus.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930155834395.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930155834395.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930155846918.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930155846918.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930162745076.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930162745076.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930163709517.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930163709517.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930221250309.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930221250309.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930221602054.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930221602054.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930221623495.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930221623495.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930222103015.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930222103015.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930222205921.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930222205921.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930222459067.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930222459067.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930222623864.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930222623864.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930223347536.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930223347536.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930223858413.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930223858413.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930224029170.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930224029170.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930224329456.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930224329456.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930233950581.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20210930233950581.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20211018150130175.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20211018150130175.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20211018173256077.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20211018173256077.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20211205203253823.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20211205203253823.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/image-20211205203342758.png b/docs/images/computer_vision/semantic_segmentation/Overview/image-20211205203342758.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/pascal_part.png b/docs/images/computer_vision/semantic_segmentation/Overview/pascal_part.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/standford.png b/docs/images/computer_vision/semantic_segmentation/Overview/standford.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/sydney.png b/docs/images/computer_vision/semantic_segmentation/Overview/sydney.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/semantic_segmentation/Overview/v2-b448e1e8b5bbf7ace5f14c6c4d44c44e_r.jpg b/docs/images/computer_vision/semantic_segmentation/Overview/v2-b448e1e8b5bbf7ace5f14c6c4d44c44e_r.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TSM/TSM.png b/docs/images/computer_vision/video_understanding/TSM/TSM.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TSM/compare_with_sota.png b/docs/images/computer_vision/video_understanding/TSM/compare_with_sota.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TSM/compare_with_tsn.png b/docs/images/computer_vision/video_understanding/TSM/compare_with_tsn.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TSM/latency_data_movement.png b/docs/images/computer_vision/video_understanding/TSM/latency_data_movement.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TSM/residual_TSM.png b/docs/images/computer_vision/video_understanding/TSM/residual_TSM.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TSM/uni_directional_TSM.png b/docs/images/computer_vision/video_understanding/TSM/uni_directional_TSM.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TimeSformer/QKV.png b/docs/images/computer_vision/video_understanding/TimeSformer/QKV.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TimeSformer/comparison.png b/docs/images/computer_vision/video_understanding/TimeSformer/comparison.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TimeSformer/different_attention_block_accuracy.png b/docs/images/computer_vision/video_understanding/TimeSformer/different_attention_block_accuracy.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TimeSformer/divided_space_time_attention.png b/docs/images/computer_vision/video_understanding/TimeSformer/divided_space_time_attention.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TimeSformer/input_clip.png b/docs/images/computer_vision/video_understanding/TimeSformer/input_clip.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TimeSformer/linear_embedding.png b/docs/images/computer_vision/video_understanding/TimeSformer/linear_embedding.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TimeSformer/patches.png b/docs/images/computer_vision/video_understanding/TimeSformer/patches.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TimeSformer/self_attention_block.png b/docs/images/computer_vision/video_understanding/TimeSformer/self_attention_block.png
old mode 100644
new mode 100755
diff --git a/docs/images/computer_vision/video_understanding/TimeSformer/visualization.png b/docs/images/computer_vision/video_understanding/TimeSformer/visualization.png
old mode 100644
new mode 100755
diff --git a/docs/images/cover/0_cover.png b/docs/images/cover/0_cover.png
old mode 100644
new mode 100755
diff --git a/docs/images/cover/SIG.png b/docs/images/cover/SIG.png
old mode 100644
new mode 100755
diff --git a/docs/images/cover/repo.png b/docs/images/cover/repo.png
old mode 100644
new mode 100755
diff --git a/docs/images/cover/repo_cover1.png b/docs/images/cover/repo_cover1.png
old mode 100644
new mode 100755
diff --git a/docs/images/cover/transformer_cover.png b/docs/images/cover/transformer_cover.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/elu.jpg b/docs/images/deep_learning/activation_functions/elu.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/hard_swish.jpg b/docs/images/deep_learning/activation_functions/hard_swish.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/identity.jpg b/docs/images/deep_learning/activation_functions/identity.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/lrelu.jpg b/docs/images/deep_learning/activation_functions/lrelu.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/prelu.jpg b/docs/images/deep_learning/activation_functions/prelu.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/relu.jpg b/docs/images/deep_learning/activation_functions/relu.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/rrelu.jpg b/docs/images/deep_learning/activation_functions/rrelu.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/selu.jpg b/docs/images/deep_learning/activation_functions/selu.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/sigmoid.jpg b/docs/images/deep_learning/activation_functions/sigmoid.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/softmax.png b/docs/images/deep_learning/activation_functions/softmax.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/softplus.jpg b/docs/images/deep_learning/activation_functions/softplus.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/softsign.jpg b/docs/images/deep_learning/activation_functions/softsign.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/step.jpg b/docs/images/deep_learning/activation_functions/step.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/swish.jpg b/docs/images/deep_learning/activation_functions/swish.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/swish2.jpg b/docs/images/deep_learning/activation_functions/swish2.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/swish_derivatives.jpg b/docs/images/deep_learning/activation_functions/swish_derivatives.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/activation_functions/tanh.jpg b/docs/images/deep_learning/activation_functions/tanh.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/basic_concepts/multi_perceptron.png b/docs/images/deep_learning/basic_concepts/multi_perceptron.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/basic_concepts/neurons.png b/docs/images/deep_learning/basic_concepts/neurons.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/basic_concepts/single_perceptron.png b/docs/images/deep_learning/basic_concepts/single_perceptron.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/basic_concepts/xor.png b/docs/images/deep_learning/basic_concepts/xor.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/loss_functions/Align_Characters.png b/docs/images/deep_learning/loss_functions/Align_Characters.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/loss_functions/CRNN.png b/docs/images/deep_learning/loss_functions/CRNN.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/loss_functions/CTC.png b/docs/images/deep_learning/loss_functions/CTC.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/loss_functions/CrossEntropy.png b/docs/images/deep_learning/loss_functions/CrossEntropy.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/loss_functions/Lb.png b/docs/images/deep_learning/loss_functions/Lb.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/loss_functions/MSE.jpg b/docs/images/deep_learning/loss_functions/MSE.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/loss_functions/speech_recognition.jpg b/docs/images/deep_learning/loss_functions/speech_recognition.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/metrics/Precision_Recall.png b/docs/images/deep_learning/metrics/Precision_Recall.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/metrics/Precision_Recall_curve.png b/docs/images/deep_learning/metrics/Precision_Recall_curve.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/metrics/Precision_Recall_list.png b/docs/images/deep_learning/metrics/Precision_Recall_list.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/attention/attn1_1.png b/docs/images/deep_learning/model_tuning/attention/attn1_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/attention/attn1_2.png b/docs/images/deep_learning/model_tuning/attention/attn1_2.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/attention/attn2_1.png b/docs/images/deep_learning/model_tuning/attention/attn2_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/attention/attn2_2.png b/docs/images/deep_learning/model_tuning/attention/attn2_2.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/attention/attn4_1.png b/docs/images/deep_learning/model_tuning/attention/attn4_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/attention/attn4_2.png b/docs/images/deep_learning/model_tuning/attention/attn4_2.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/attention/attn4_3.png b/docs/images/deep_learning/model_tuning/attention/attn4_3.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/learning_rate.png b/docs/images/deep_learning/model_tuning/learning_rate.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/net_for_params_init.png b/docs/images/deep_learning/model_tuning/net_for_params_init.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/normalization/Comparative_Analysis_of_KNN_Algorithm.png b/docs/images/deep_learning/model_tuning/normalization/Comparative_Analysis_of_KNN_Algorithm.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/normalization/normalization.png b/docs/images/deep_learning/model_tuning/normalization/normalization.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/Dropout.png b/docs/images/deep_learning/model_tuning/regularization/Dropout.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/appropriate_fitting.png b/docs/images/deep_learning/model_tuning/regularization/appropriate_fitting.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/complex_network.jpeg b/docs/images/deep_learning/model_tuning/regularization/complex_network.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/dropconnect.png b/docs/images/deep_learning/model_tuning/regularization/dropconnect.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/dropconnect_inference.png b/docs/images/deep_learning/model_tuning/regularization/dropconnect_inference.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/dropconnect_training.png b/docs/images/deep_learning/model_tuning/regularization/dropconnect_training.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/early_stop.png b/docs/images/deep_learning/model_tuning/regularization/early_stop.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/overfitting.png b/docs/images/deep_learning/model_tuning/regularization/overfitting.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/overfitting_v1.png b/docs/images/deep_learning/model_tuning/regularization/overfitting_v1.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/overfitting_v2.png b/docs/images/deep_learning/model_tuning/regularization/overfitting_v2.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/model_tuning/regularization/underfitting.png b/docs/images/deep_learning/model_tuning/regularization/underfitting.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/normalization/Layer_Normalization/BN_LN_IN_GN.png b/docs/images/deep_learning/normalization/Layer_Normalization/BN_LN_IN_GN.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/optimizers/learning_rate.png b/docs/images/deep_learning/optimizers/learning_rate.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/optimizers/momentum.png b/docs/images/deep_learning/optimizers/momentum.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/optimizers/saddle_point.png b/docs/images/deep_learning/optimizers/saddle_point.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/optimizers/sgd.png b/docs/images/deep_learning/optimizers/sgd.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/optimizers/sgd_momentum.png b/docs/images/deep_learning/optimizers/sgd_momentum.png
old mode 100644
new mode 100755
diff --git a/docs/images/deep_learning/optimizers/sgd_no_momentum.png b/docs/images/deep_learning/optimizers/sgd_no_momentum.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/basic_concept/3.png b/docs/images/generative_adversarial_network/basic_concept/3.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/basic_concept/4_.png b/docs/images/generative_adversarial_network/basic_concept/4_.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/basic_concept/5.png b/docs/images/generative_adversarial_network/basic_concept/5.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/basic_concept/6.png b/docs/images/generative_adversarial_network/basic_concept/6.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/basic_concept/7.png b/docs/images/generative_adversarial_network/basic_concept/7.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/basic_concept/9.png b/docs/images/generative_adversarial_network/basic_concept/9.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/encoder_decoder/decoder-2.png b/docs/images/generative_adversarial_network/encoder_decoder/decoder-2.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/encoder_decoder/encoder-1.png b/docs/images/generative_adversarial_network/encoder_decoder/encoder-1.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/encoder_decoder/face-decoder.png b/docs/images/generative_adversarial_network/encoder_decoder/face-decoder.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/encoder_decoder/face-encoder.png b/docs/images/generative_adversarial_network/encoder_decoder/face-encoder.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/gan_applications/y_1.png b/docs/images/generative_adversarial_network/gan_applications/y_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/gan_applications/y_2.png b/docs/images/generative_adversarial_network/gan_applications/y_2.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/gan_applications/y_3.png b/docs/images/generative_adversarial_network/gan_applications/y_3.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/gan_applications/y_4.png b/docs/images/generative_adversarial_network/gan_applications/y_4.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/gan_applications/y_5.png b/docs/images/generative_adversarial_network/gan_applications/y_5.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/gan_applications/y_6.png b/docs/images/generative_adversarial_network/gan_applications/y_6.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/gan_applications/y_7.png b/docs/images/generative_adversarial_network/gan_applications/y_7.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/gan_applications/y_8.png b/docs/images/generative_adversarial_network/gan_applications/y_8.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/SRdemo.png b/docs/images/generative_adversarial_network/overview/SRdemo.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924121445444.png b/docs/images/generative_adversarial_network/overview/image-20210924121445444.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924162507284.png b/docs/images/generative_adversarial_network/overview/image-20210924162507284.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924162529903.png b/docs/images/generative_adversarial_network/overview/image-20210924162529903.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924163643263.png b/docs/images/generative_adversarial_network/overview/image-20210924163643263.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924182721882.png b/docs/images/generative_adversarial_network/overview/image-20210924182721882.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924184050192.png b/docs/images/generative_adversarial_network/overview/image-20210924184050192.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924184248089.png b/docs/images/generative_adversarial_network/overview/image-20210924184248089.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924200004116.png b/docs/images/generative_adversarial_network/overview/image-20210924200004116.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924201752871.png b/docs/images/generative_adversarial_network/overview/image-20210924201752871.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924201908874.png b/docs/images/generative_adversarial_network/overview/image-20210924201908874.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924201943914.png b/docs/images/generative_adversarial_network/overview/image-20210924201943914.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924210253343.png b/docs/images/generative_adversarial_network/overview/image-20210924210253343.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924210514863.png b/docs/images/generative_adversarial_network/overview/image-20210924210514863.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211205242.png b/docs/images/generative_adversarial_network/overview/image-20210924211205242.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211241513.png b/docs/images/generative_adversarial_network/overview/image-20210924211241513.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211300696.png b/docs/images/generative_adversarial_network/overview/image-20210924211300696.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211326380.png b/docs/images/generative_adversarial_network/overview/image-20210924211326380.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211533198.png b/docs/images/generative_adversarial_network/overview/image-20210924211533198.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211557485.png b/docs/images/generative_adversarial_network/overview/image-20210924211557485.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211618756.png b/docs/images/generative_adversarial_network/overview/image-20210924211618756.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211650061.png b/docs/images/generative_adversarial_network/overview/image-20210924211650061.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211712464.png b/docs/images/generative_adversarial_network/overview/image-20210924211712464.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211738775.png b/docs/images/generative_adversarial_network/overview/image-20210924211738775.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211812360.png b/docs/images/generative_adversarial_network/overview/image-20210924211812360.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211830154.png b/docs/images/generative_adversarial_network/overview/image-20210924211830154.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211848410.png b/docs/images/generative_adversarial_network/overview/image-20210924211848410.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211907600.png b/docs/images/generative_adversarial_network/overview/image-20210924211907600.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211928053.png b/docs/images/generative_adversarial_network/overview/image-20210924211928053.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924211952892.png b/docs/images/generative_adversarial_network/overview/image-20210924211952892.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924212013245.png b/docs/images/generative_adversarial_network/overview/image-20210924212013245.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924212057187.png b/docs/images/generative_adversarial_network/overview/image-20210924212057187.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210924212120899.png b/docs/images/generative_adversarial_network/overview/image-20210924212120899.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210927203906695.png b/docs/images/generative_adversarial_network/overview/image-20210927203906695.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210927203953843.png b/docs/images/generative_adversarial_network/overview/image-20210927203953843.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210927204008524.png b/docs/images/generative_adversarial_network/overview/image-20210927204008524.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210927204023795.png b/docs/images/generative_adversarial_network/overview/image-20210927204023795.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210927204038182.png b/docs/images/generative_adversarial_network/overview/image-20210927204038182.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210929122358733.png b/docs/images/generative_adversarial_network/overview/image-20210929122358733.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/image-20210929122654755.png b/docs/images/generative_adversarial_network/overview/image-20210929122654755.png
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/v2-3cda6e503b5fbef0811a9e7c58d0fb16_720w.jpg b/docs/images/generative_adversarial_network/overview/v2-3cda6e503b5fbef0811a9e7c58d0fb16_720w.jpg
old mode 100644
new mode 100755
diff --git a/docs/images/generative_adversarial_network/overview/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3B5MTg0NDczODk0,size_16,color_FFFFFF,t_70.png b/docs/images/generative_adversarial_network/overview/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3B5MTg0NDczODk0,size_16,color_FFFFFF,t_70.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/metric_based_meta_learning/MN/MN.png b/docs/images/meta_learning/metric_based_meta_learning/MN/MN.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/metric_based_meta_learning/PN/PN.png b/docs/images/meta_learning/metric_based_meta_learning/PN/PN.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/metric_based_meta_learning/RN/RNArchitecture.png b/docs/images/meta_learning/metric_based_meta_learning/RN/RNArchitecture.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/metric_based_meta_learning/RN/RNModel.png b/docs/images/meta_learning/metric_based_meta_learning/RN/RNModel.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/metric_based_meta_learning/SNAIL/SNAIL.png b/docs/images/meta_learning/metric_based_meta_learning/SNAIL/SNAIL.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/metric_based_meta_learning/SNAIL/SNAILBuildingBlocks.png b/docs/images/meta_learning/metric_based_meta_learning/SNAIL/SNAILBuildingBlocks.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/model_based_meta_learning/Learning_to_Learn/LearningToLearnComputationalGraph.png b/docs/images/meta_learning/model_based_meta_learning/Learning_to_Learn/LearningToLearnComputationalGraph.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/model_based_meta_learning/Learning_to_Learn/LearningToLearnLSTMOptimizer.png b/docs/images/meta_learning/model_based_meta_learning/Learning_to_Learn/LearningToLearnLSTMOptimizer.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/model_based_meta_learning/Learning_to_Learn/LearningToLearnOptimizerOptimizee.png b/docs/images/meta_learning/model_based_meta_learning/Learning_to_Learn/LearningToLearnOptimizerOptimizee.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/model_based_meta_learning/Meta_Learner_LSTM/MetaLearnerLSTM.png b/docs/images/meta_learning/model_based_meta_learning/Meta_Learner_LSTM/MetaLearnerLSTM.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/optimization_based_meta_learning/LEO/LEOSchematic.png b/docs/images/meta_learning/optimization_based_meta_learning/LEO/LEOSchematic.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/optimization_based_meta_learning/LEO/LEOStructure.png b/docs/images/meta_learning/optimization_based_meta_learning/LEO/LEOStructure.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/optimization_based_meta_learning/MAML/MAMLSchematicDiagram.png b/docs/images/meta_learning/optimization_based_meta_learning/MAML/MAMLSchematicDiagram.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/preliminaries/BilevelOptimization.png b/docs/images/meta_learning/preliminaries/BilevelOptimization.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/preliminaries/MetaBaseLearner.png b/docs/images/meta_learning/preliminaries/MetaBaseLearner.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/preliminaries/MetaLearningWorkingPrinciple.png b/docs/images/meta_learning/preliminaries/MetaLearningWorkingPrinciple.png
old mode 100644
new mode 100755
diff --git a/docs/images/meta_learning/preliminaries/Task.png b/docs/images/meta_learning/preliminaries/Task.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_compress.png b/docs/images/model_compress/model_compress.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_compress_method.png b/docs/images/model_compress/model_compress_method.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/DistilBERT/parameter_counts.png b/docs/images/model_compress/model_distill/DistilBERT/parameter_counts.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/DistilBERT/result.png b/docs/images/model_compress/model_distill/DistilBERT/result.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/DynaBERT/DynaBERT.png b/docs/images/model_compress/model_distill/DynaBERT/DynaBERT.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/DynaBERT/Network_Rewiring.png b/docs/images/model_compress/model_distill/DynaBERT/Network_Rewiring.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/DynaBERT/comparasion.png b/docs/images/model_compress/model_distill/DynaBERT/comparasion.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/DynaBERT/result_on_glue.png b/docs/images/model_compress/model_distill/DynaBERT/result_on_glue.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/PKD/PKD.png b/docs/images/model_compress/model_distill/PKD/PKD.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/PKD/comparasion_pkd_kd.png b/docs/images/model_compress/model_distill/PKD/comparasion_pkd_kd.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/PKD/last_skip_comparasion.png b/docs/images/model_compress/model_distill/PKD/last_skip_comparasion.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/PKD/parameters.png b/docs/images/model_compress/model_distill/PKD/parameters.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/PKD/result_GLUE.png b/docs/images/model_compress/model_distill/PKD/result_GLUE.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/TinyBERT/TinyBERT_learning.png b/docs/images/model_compress/model_distill/TinyBERT/TinyBERT_learning.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/TinyBERT/Transformer-layer_distillation.png b/docs/images/model_compress/model_distill/TinyBERT/Transformer-layer_distillation.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/model_distill/TinyBERT/result_on_GLUE.png b/docs/images/model_compress/model_distill/TinyBERT/result_on_GLUE.png
old mode 100644
new mode 100755
diff --git a/docs/images/model_compress/performance.png b/docs/images/model_compress/performance.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_1.png b/docs/images/natural_language_processing/SimCSE/sim_cse_1.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_10.png b/docs/images/natural_language_processing/SimCSE/sim_cse_10.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_11.png b/docs/images/natural_language_processing/SimCSE/sim_cse_11.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_12.png b/docs/images/natural_language_processing/SimCSE/sim_cse_12.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_13.png b/docs/images/natural_language_processing/SimCSE/sim_cse_13.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_14.png b/docs/images/natural_language_processing/SimCSE/sim_cse_14.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_2.png b/docs/images/natural_language_processing/SimCSE/sim_cse_2.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_3.png b/docs/images/natural_language_processing/SimCSE/sim_cse_3.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_4.png b/docs/images/natural_language_processing/SimCSE/sim_cse_4.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_5.png b/docs/images/natural_language_processing/SimCSE/sim_cse_5.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_6.png b/docs/images/natural_language_processing/SimCSE/sim_cse_6.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_7.png b/docs/images/natural_language_processing/SimCSE/sim_cse_7.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_8.png b/docs/images/natural_language_processing/SimCSE/sim_cse_8.png
old mode 100644
new mode 100755
diff --git a/docs/images/natural_language_processing/SimCSE/sim_cse_9.png b/docs/images/natural_language_processing/SimCSE/sim_cse_9.png
old mode 100644
new mode 100755
diff --git a/docs/images/paddle.png b/docs/images/paddle.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/CLA.png b/docs/images/pr_procedure/CLA.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/fork1.png b/docs/images/pr_procedure/fork1.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/fork2.png b/docs/images/pr_procedure/fork2.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/git_config.png b/docs/images/pr_procedure/git_config.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/pr.png b/docs/images/pr_procedure/pr.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/pr1.png b/docs/images/pr_procedure/pr1.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/pr2.png b/docs/images/pr_procedure/pr2.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/pr_check1.png b/docs/images/pr_procedure/pr_check1.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/pr_check2.png b/docs/images/pr_procedure/pr_check2.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/repo_address.png b/docs/images/pr_procedure/repo_address.png
old mode 100644
new mode 100755
diff --git a/docs/images/pr_procedure/terminal.png b/docs/images/pr_procedure/terminal.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ELMo/elmo.png b/docs/images/pretrain_model/ELMo/elmo.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ELMo/elmo_finetune.png b/docs/images/pretrain_model/ELMo/elmo_finetune.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ELMo/elmo_v1.jpeg b/docs/images/pretrain_model/ELMo/elmo_v1.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ERNIE3/image-20210909151227754.png b/docs/images/pretrain_model/ERNIE3/image-20210909151227754.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ERNIE3/image-20210909163619484.png b/docs/images/pretrain_model/ERNIE3/image-20210909163619484.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ERNIE3/image-20210909165039097.png b/docs/images/pretrain_model/ERNIE3/image-20210909165039097.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Subword/bpe_compress.gif b/docs/images/pretrain_model/Subword/bpe_compress.gif
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Subword/bpe_shortcoming.png b/docs/images/pretrain_model/Subword/bpe_shortcoming.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Subword/subword-probabilistic-tokenization.png b/docs/images/pretrain_model/Subword/subword-probabilistic-tokenization.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Subword/tokenize.png b/docs/images/pretrain_model/Subword/tokenize.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Transformer/Transformer_architecture.png b/docs/images/pretrain_model/Transformer/Transformer_architecture.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Transformer/decoder.png b/docs/images/pretrain_model/Transformer/decoder.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Transformer/encoder.png b/docs/images/pretrain_model/Transformer/encoder.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Transformer/feed_forward.png b/docs/images/pretrain_model/Transformer/feed_forward.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Transformer/input_embedding.png b/docs/images/pretrain_model/Transformer/input_embedding.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Transformer/linear_softmax.png b/docs/images/pretrain_model/Transformer/linear_softmax.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Transformer/transformer.png b/docs/images/pretrain_model/Transformer/transformer.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/Transformer/transformer_decoding_2.gif b/docs/images/pretrain_model/Transformer/transformer_decoding_2.gif
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/electra/bert_cons.png b/docs/images/pretrain_model/electra/bert_cons.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/electra/electra.png b/docs/images/pretrain_model/electra/electra.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/electra/electra_generator.png b/docs/images/pretrain_model/electra/electra_generator.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/electra/electra_glue.png b/docs/images/pretrain_model/electra/electra_glue.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/electra/electra_training.png b/docs/images/pretrain_model/electra/electra_training.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/electra/experiment1.png b/docs/images/pretrain_model/electra/experiment1.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/electra/experiment2.png b/docs/images/pretrain_model/electra/experiment2.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ernie-doc/image-20210913143559222.png b/docs/images/pretrain_model/ernie-doc/image-20210913143559222.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ernie-doc/image-20210913160057297.png b/docs/images/pretrain_model/ernie-doc/image-20210913160057297.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ernie-doc/image-20210913164545349.png b/docs/images/pretrain_model/ernie-doc/image-20210913164545349.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ernie-gram/image-20210830151839177.png b/docs/images/pretrain_model/ernie-gram/image-20210830151839177.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ernie-gram/image-20210830162209858.png b/docs/images/pretrain_model/ernie-gram/image-20210830162209858.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ernie-gram/image-20210830173032869.png b/docs/images/pretrain_model/ernie-gram/image-20210830173032869.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/ernie-gram/image-20210830180113892.png b/docs/images/pretrain_model/ernie-gram/image-20210830180113892.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/linear_attention.png b/docs/images/pretrain_model/performer/linear_attention.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/mask_attention.gif b/docs/images/pretrain_model/performer/mask_attention.gif
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/performer_exp1.jpeg b/docs/images/pretrain_model/performer/performer_exp1.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/performer_exp2.jpeg b/docs/images/pretrain_model/performer/performer_exp2.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/performer_protein1.jpeg b/docs/images/pretrain_model/performer/performer_protein1.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/performer_protein2.jpeg b/docs/images/pretrain_model/performer/performer_protein2.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/performer_protein3.jpeg b/docs/images/pretrain_model/performer/performer_protein3.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/q_k_v.png b/docs/images/pretrain_model/performer/q_k_v.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/softmax_mask.jpeg b/docs/images/pretrain_model/performer/softmax_mask.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/performer/sparse_attention.jpeg b/docs/images/pretrain_model/performer/sparse_attention.jpeg
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/spanbert/image-20210908110847411.png b/docs/images/pretrain_model/spanbert/image-20210908110847411.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/spanbert/image-20210908112609307.png b/docs/images/pretrain_model/spanbert/image-20210908112609307.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/spanbert/image-20210908112616233.png b/docs/images/pretrain_model/spanbert/image-20210908112616233.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/spanbert/image-20210908114119106.png b/docs/images/pretrain_model/spanbert/image-20210908114119106.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/spanbert/image-20210908152529634.png b/docs/images/pretrain_model/spanbert/image-20210908152529634.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/spanbert/image-20210908153507867.png b/docs/images/pretrain_model/spanbert/image-20210908153507867.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/spanbert/image-20210908153632292.png b/docs/images/pretrain_model/spanbert/image-20210908153632292.png
old mode 100644
new mode 100755
diff --git a/docs/images/pretrain_model/spanbert/image-20210908161433353.png b/docs/images/pretrain_model/spanbert/image-20210908161433353.png
old mode 100644
new mode 100755
diff --git a/docs/images/reinforcement_learning/MDP.png b/docs/images/reinforcement_learning/MDP.png
old mode 100644
new mode 100755
diff --git a/docs/images/reinforcement_learning/different_states.png b/docs/images/reinforcement_learning/different_states.png
old mode 100644
new mode 100755
diff --git a/docs/images/reinforcement_learning/flappy_bird.png b/docs/images/reinforcement_learning/flappy_bird.png
old mode 100644
new mode 100755
diff --git a/docs/images/reinforcement_learning/pacman.png b/docs/images/reinforcement_learning/pacman.png
old mode 100644
new mode 100755
diff --git a/docs/index.rst b/docs/index.rst
old mode 100644
new mode 100755
diff --git a/docs/make.bat b/docs/make.bat
old mode 100644
new mode 100755
diff --git a/docs/requirements.txt b/docs/requirements.txt
old mode 100644
new mode 100755
diff --git a/docs/tutorials/CNN/CV_CNN.md b/docs/tutorials/CNN/CV_CNN.md
old mode 100644
new mode 100755
index d9172749a..c7d375581
--- a/docs/tutorials/CNN/CV_CNN.md
+++ b/docs/tutorials/CNN/CV_CNN.md
@@ -68,8 +68,3 @@ typora-root-url: ../CNN
- 激活函数:激活函数给神经元引入了非线性因素,对输入信息进行非线性变换,从而使得神经网络可以任意逼近任何非线性函数,然后将变换后的输出信息作为输入信息传给下一层神经元。
- 全连接层:全连接层用于对卷积神经网络提取到的特征进行汇总,将多维的特征映射为二维的输出。其中,高维代表样本批次大小,低维代表分类或回归结果。
-
-
-
-
-
diff --git a/docs/tutorials/CNN/ParamsCounter.md b/docs/tutorials/CNN/ParamsCounter.md
old mode 100644
new mode 100755
index dc964dccf..f8d51c523
--- a/docs/tutorials/CNN/ParamsCounter.md
+++ b/docs/tutorials/CNN/ParamsCounter.md
@@ -40,13 +40,13 @@ class TestNet(nn.Layer):
def forward(self, x):
x = self.conv2d(x)
return x
-
+
if __name__ == "__main__":
net = TestNet()
paddle.flops(net, input_size=[1, 2, 320, 320])
-
-
+
+
Total GFlops: 0.00778 Total Params: 76.00
```
API得出的参数量为76,GFLOPs为0.00778,这里的GFLOPs就是FLOPs的10$^9$倍,我们的参数量求得的也是76,那么FLOPs呢?我们来算一下,输入的尺寸为320 * 320, 卷积核为3 * 3, 且padding为1,那么图片输入的大小和输出的大小一致,即输出也是320 * 320, 那么根据我们的公式可得: $76 * 320 * 320 = 7782400$, 与API的一致!因此大家计算卷积层的参数和FLOPs的时候就可以用上面的公式。
@@ -138,7 +138,7 @@ features[1] 参数量和FLOPs均为0
features[2] 参数量和FLOPs均为0, 输出尺寸变为14 * 14
-features[3] 参数量:$ 16 * 6 * 5 * 5 + 16 = 2416$, FLOPs : $ 2416 * 10 * 10 = 241600$, 需要注意的是,这个卷积没有padding,所以输出特征图大小变为 10 * 10
+features[3] 参数量:$ 16 * 6 * 5 * 5 + 16 = 2416$, FLOPs : $ 2416 * 10 * 10 = 241600$, 需要注意的是,这个卷积没有padding,所以输出特征图大小变为 10 * 10
features[4] 参数量和FLOPs均为0
diff --git a/docs/tutorials/CNN/Pooling.md b/docs/tutorials/CNN/Pooling.md
old mode 100644
new mode 100755
index e06d5abc2..934c5f5f9
--- a/docs/tutorials/CNN/Pooling.md
+++ b/docs/tutorials/CNN/Pooling.md
@@ -126,7 +126,7 @@ print('result:', 'shape of x:', x.shape, 'shape of result:', y.shape)
result: shape of x: [1, 1, 6, 6] shape of result: [1, 1, 6, 6]
```
这个呢,就和我们上面说的一致。下面来看看VALID填充方式吧。
-```python
+```python
import paddle # Padding VALID
x = paddle.rand((1, 1, 6, 6))
avgpool = paddle.nn.AvgPool2D(kernel_size=2, padding='VALID')
diff --git a/docs/tutorials/CNN/convolution_operator/1_Convolution.md b/docs/tutorials/CNN/convolution_operator/1_Convolution.md
old mode 100644
new mode 100755
index 199895d0c..04e506b24
--- a/docs/tutorials/CNN/convolution_operator/1_Convolution.md
+++ b/docs/tutorials/CNN/convolution_operator/1_Convolution.md
@@ -60,11 +60,11 @@ class Inception(paddle.nn.Layer):
def __init__(self, c0, c1, c2, c3, c4, **kwargs):
'''
Inception模块的实现代码,
-
+
c1,图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数
- c2,图(b)中第二条支路卷积的输出通道数,数据类型是tuple或list,
+ c2,图(b)中第二条支路卷积的输出通道数,数据类型是tuple或list,
其中c2[0]是1x1卷积的输出通道数,c2[1]是3x3
- c3,图(b)中第三条支路卷积的输出通道数,数据类型是tuple或list,
+ c3,图(b)中第三条支路卷积的输出通道数,数据类型是tuple或list,
其中c3[0]是1x1卷积的输出通道数,c3[1]是3x3
c4,图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数
'''
@@ -134,7 +134,7 @@ class ConvBNLayer(paddle.nn.Layer):
stride=1,
groups=1,
act=None):
-
+
"""
num_channels, 卷积层的输入通道数
num_filters, 卷积层的输出通道数
@@ -155,7 +155,7 @@ class ConvBNLayer(paddle.nn.Layer):
# 创建BatchNorm层
self._batch_norm = paddle.nn.BatchNorm2D(num_filters)
-
+
self.act = act
def forward(self, inputs):
@@ -232,4 +232,3 @@ class BottleneckBlock(paddle.nn.Layer):
[1] [Going deeper with convolutions](https://arxiv.org/pdf/1409.4842.pdf)
[2] [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385.pdf)
-
diff --git a/docs/tutorials/CNN/convolution_operator/3D_Convolution.md b/docs/tutorials/CNN/convolution_operator/3D_Convolution.md
old mode 100644
new mode 100755
index 2ca0730f8..dfc8363f6
--- a/docs/tutorials/CNN/convolution_operator/3D_Convolution.md
+++ b/docs/tutorials/CNN/convolution_operator/3D_Convolution.md
@@ -43,4 +43,3 @@
[1] [3D Convolutional Neural Networks for Human Action Recognition](http://users.eecs.northwestern.edu/~mya671/mypapers/ICML10_Ji_Xu_Yang_Yu.pdf)
[2] [3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation](https://arxiv.org/abs/1606.06650)
-
diff --git a/docs/tutorials/CNN/convolution_operator/Convolution.md b/docs/tutorials/CNN/convolution_operator/Convolution.md
old mode 100644
new mode 100755
index d670d0682..673c54851
--- a/docs/tutorials/CNN/convolution_operator/Convolution.md
+++ b/docs/tutorials/CNN/convolution_operator/Convolution.md
@@ -20,7 +20,7 @@
为了解决上述问题,引入卷积(Convolution)来对输入的图像进行特征提取。卷积的计算范围是在像素点的空间邻域内进行的,因此可以利用输入图像的空间信息;此外,由于卷积具有局部连接、权重共享等特性,卷积核参数的数目也远小于全连接层。
-## 二、卷积核 / 特征图 / 卷积计算
+## 二、卷积核 / 特征图 / 卷积计算
**卷积核(kernel)**:也被叫做滤波器(filter),假设卷积核的高和宽分别为$k_h$和$k_w$,则将称为$k_h\times k_w$卷积,比如$3\times5$卷积,就是指卷积核的高为3, 宽为5。卷积核中数值为对图像中与卷积核同样大小的子块像素点进行卷积计算时所采用的权重。
@@ -365,10 +365,10 @@ w = w.reshape([1, 1, 3, 3])
w = np.repeat(w, 3, axis=1)
# 创建卷积算子,输出通道数为1,卷积核大小为3x3,
# 并使用上面的设置好的数值作为卷积核权重的初始化参数
-conv = Conv2D(in_channels=3, out_channels=1, kernel_size=[3, 3],
+conv = Conv2D(in_channels=3, out_channels=1, kernel_size=[3, 3],
weight_attr=paddle.ParamAttr(
initializer=Assign(value=w)))
-
+
# 将读入的图片转化为float32类型的numpy.ndarray
x = np.array(img).astype('float32')
# 图片读入成ndarry时,形状是[H, W, 3],
@@ -420,7 +420,7 @@ img = np.array(img)
# 创建初始化参数
w = np.ones([1, 1, 5, 5], dtype = 'float32')/25
-conv = Conv2D(in_channels=1, out_channels=1, kernel_size=[5, 5],
+conv = Conv2D(in_channels=1, out_channels=1, kernel_size=[5, 5],
weight_attr=paddle.ParamAttr(
initializer=Assign(value=w)))
x = img.astype('float32')
@@ -449,4 +449,3 @@ plt.show()
## 参考文献
[1] [Visualizing and Understanding Convolutional Networks](https://arxiv.org/pdf/1311.2901.pdf)
-
diff --git a/docs/tutorials/CNN/convolution_operator/Deformable_Convolution.md b/docs/tutorials/CNN/convolution_operator/Deformable_Convolution.md
old mode 100644
new mode 100755
index 409f995c5..23279589e
--- a/docs/tutorials/CNN/convolution_operator/Deformable_Convolution.md
+++ b/docs/tutorials/CNN/convolution_operator/Deformable_Convolution.md
@@ -176,4 +176,4 @@ print(out.shape)
>
> [4] https://www.zhihu.com/question/303900394/answer/540818451
>
-> [5] https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/vision/ops/deform_conv2d_cn.html
\ No newline at end of file
+> [5] https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/vision/ops/deform_conv2d_cn.html
diff --git a/docs/tutorials/CNN/convolution_operator/Dilated_Convolution.md b/docs/tutorials/CNN/convolution_operator/Dilated_Convolution.md
old mode 100644
new mode 100755
index 2b8f11ee2..e5d8dd1ce
--- a/docs/tutorials/CNN/convolution_operator/Dilated_Convolution.md
+++ b/docs/tutorials/CNN/convolution_operator/Dilated_Convolution.md
@@ -75,4 +75,3 @@ $$F = k + (k-1)(r-1)$$
[7 ] [Receptive Field Block Net for Accurate and Fast Object Detection](https://arxiv.org/pdf/1711.07767.pdf)
[8] [WaveNet: a generative model for raw audio](https://arxiv.org/pdf/1609.03499.pdf)
-
diff --git a/docs/tutorials/CNN/convolution_operator/Group_Convolution.md b/docs/tutorials/CNN/convolution_operator/Group_Convolution.md
old mode 100644
new mode 100755
index 6f42f7b6f..a3764b313
--- a/docs/tutorials/CNN/convolution_operator/Group_Convolution.md
+++ b/docs/tutorials/CNN/convolution_operator/Group_Convolution.md
@@ -46,4 +46,3 @@ $$
## 参考文献
[1] [ImageNet Classification with Deep Convolutional Neural Networks](http://stanford.edu/class/cs231m/references/alexnet.pdf)
-
diff --git a/docs/tutorials/CNN/convolution_operator/Separable_Convolution.md b/docs/tutorials/CNN/convolution_operator/Separable_Convolution.md
old mode 100644
new mode 100755
index 270a05346..6fd138b1b
--- a/docs/tutorials/CNN/convolution_operator/Separable_Convolution.md
+++ b/docs/tutorials/CNN/convolution_operator/Separable_Convolution.md
@@ -19,12 +19,12 @@ $$
4 & 8 & 12 \\
5 & 10 & 15
\end{array}\right]
-=
+=
\left[\begin{array}{ccc}
3 \\
4 \\
5
-\end{array}\right]
+\end{array}\right]
\times
\left[\begin{array}{ccc}
1 \quad 2 \quad 3
@@ -52,12 +52,12 @@ $$
-2 & 0 & 2 \\
-1 & 0 & 1
\end{array}\right]
-=
+=
\left[\begin{array}{ccc}
1 \\
2 \\
1
-\end{array}\right]
+\end{array}\right]
\times
\left[\begin{array}{ccc}
-1 \quad 0 \quad 1
@@ -151,4 +151,3 @@ MobileNetv1[1]中使用的深度可分离卷积如 **图7** 右侧所
### 参考文献
[1] [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf)
-
diff --git a/docs/tutorials/CNN/convolution_operator/Transpose_Convolution.md b/docs/tutorials/CNN/convolution_operator/Transpose_Convolution.md
old mode 100644
new mode 100755
index ede08a379..d122c3530
--- a/docs/tutorials/CNN/convolution_operator/Transpose_Convolution.md
+++ b/docs/tutorials/CNN/convolution_operator/Transpose_Convolution.md
@@ -52,7 +52,7 @@ input=\left[\begin{array}{ccc}
x_1 & x_2 & x_3 & x_4 \\
x_6 & x_7 & x_8 & x_9 \\
x_{10} & x_{11} & x_{12} & x_{13} \\
-x_{14} & x_{15} & x_{16} & x_{17}
+x_{14} & x_{15} & x_{16} & x_{17}
\end{array}\right]
$$
一个尺寸为$3\times{3}$ 的标准卷积核 $kernel$:
@@ -84,7 +84,7 @@ X=\left[\begin{array}{ccc}
x_1 \\ x_2 \\ x_3 \\ x_4 \\
x_6 \\ x_7 \\ x_8 \\ x_9 \\
x_{10} \\ x_{11} \\ x_{12} \\ x_{13} \\
-x_{14} \\ x_{15} \\ x_{16} \\ x_{17}
+x_{14} \\ x_{15} \\ x_{16} \\ x_{17}
\end{array}\right]
$$
@@ -254,4 +254,3 @@ $$
[3] [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/pdf/1505.04597.pdf)
[4] [Visualizing and Understanding Convolutional Networks](https://arxiv.org/pdf/1311.2901v2.pdf)
-
diff --git a/docs/tutorials/CNN/convolution_operator/index.rst b/docs/tutorials/CNN/convolution_operator/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/CNN/index.rst b/docs/tutorials/CNN/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/OCR/OCR.md b/docs/tutorials/computer_vision/OCR/OCR.md
old mode 100644
new mode 100755
index 0859fff90..d267c6248
--- a/docs/tutorials/computer_vision/OCR/OCR.md
+++ b/docs/tutorials/computer_vision/OCR/OCR.md
@@ -55,7 +55,7 @@ OCR文字检测就是将图片中的文字区域检测出来。
#### 2) 像素值回归
-采用像素值回归的方法主要有CRAFT和SA-Text,这类算法能够检测弯曲文本且对小文本效果优秀但是实时性能不够。
+采用像素值回归的方法主要有CRAFT和SA-Text,这类算法能够检测弯曲文本且对小文本效果优秀但是实时性能不够。
### 基于分割的算法
@@ -97,6 +97,3 @@ OCR文字检测就是将图片中的文字区域检测出来。
对应到OCR技术实现问题上,则一般面临仿射变换、尺度问题、光照不足、拍摄模糊等技术难点; 另外OCR应用常对接海量数据,因此要求数据能够得到实时处理;并且OCR应用常部署在移动端或嵌入式硬件,而端侧的存储空间和计算能力有限,因此对OCR模型的大小和预测速度有很高的要求。

-
-
-
diff --git a/docs/tutorials/computer_vision/OCR/OCR_Detection/CTPN.md b/docs/tutorials/computer_vision/OCR/OCR_Detection/CTPN.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/OCR/OCR_Detection/DBNet.md b/docs/tutorials/computer_vision/OCR/OCR_Detection/DBNet.md
old mode 100644
new mode 100755
index 493481ef3..1d724d850
--- a/docs/tutorials/computer_vision/OCR/OCR_Detection/DBNet.md
+++ b/docs/tutorials/computer_vision/OCR/OCR_Detection/DBNet.md
@@ -62,7 +62,7 @@ $$\hat{B} = \frac{1}{1 + e^{-k(P_{i,j}-T_{i,j})}}$$
标准二值化和可微二值化的对比图如 **图3(a)** 所示。
-之所以DB方法会改善算法性能,我们可以在反向传播时梯度的计算上进行观察。当使用交叉熵损失时,正负样本的loss分别为 $l_+$ 和 $l_-$
+之所以DB方法会改善算法性能,我们可以在反向传播时梯度的计算上进行观察。当使用交叉熵损失时,正负样本的loss分别为 $l_+$ 和 $l_-$
$$ l_+ = -log(\frac{1}{1 + e^{-k(P_{i,j}-T_{i,j})}})$$
@@ -111,4 +111,3 @@ $dice\_loss = 1 - \frac{2 \times intersection\_area}{total\_area}$
## 参考文献
[1] [Real-time Scene Text Detection with Differentiable Binarization](https://arxiv.org/pdf/1911.08947.pdf)
-
diff --git a/docs/tutorials/computer_vision/OCR/OCR_Detection/EAST.md b/docs/tutorials/computer_vision/OCR/OCR_Detection/EAST.md
old mode 100644
new mode 100755
index 8607748b8..add36a88d
--- a/docs/tutorials/computer_vision/OCR/OCR_Detection/EAST.md
+++ b/docs/tutorials/computer_vision/OCR/OCR_Detection/EAST.md
@@ -92,4 +92,3 @@ $$N_{Q^*} = \min\limits_{i=1}^{4} D(p_i, p_{(i mode 4)+1})$$
## 参考文献
[1] [EAST: An Efficient and Accurate Scene Text Detector](https://arxiv.org/pdf/1704.03155.pdf)
-
diff --git a/docs/tutorials/computer_vision/OCR/OCR_Detection/index.rst b/docs/tutorials/computer_vision/OCR/OCR_Detection/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/OCR/OCR_Recognition/CRNN.md b/docs/tutorials/computer_vision/OCR/OCR_Recognition/CRNN.md
old mode 100644
new mode 100755
index d92c4270f..ac7e29b83
--- a/docs/tutorials/computer_vision/OCR/OCR_Recognition/CRNN.md
+++ b/docs/tutorials/computer_vision/OCR/OCR_Recognition/CRNN.md
@@ -43,4 +43,3 @@ CRNN的主要结构包括基于CNN的图像特征提取模块以及基于多层
## 参考文献
[1] [An End-to-End Trainable Neural Network for Image-based SequenceRecognition and Its Application to Scene Text Recognition](https://arxiv.org/pdf/1507.05717v1.pdf)
-
diff --git a/docs/tutorials/computer_vision/OCR/OCR_Recognition/index.rst b/docs/tutorials/computer_vision/OCR/OCR_Recognition/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/OCR/index.rst b/docs/tutorials/computer_vision/OCR/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/classification/AlexNet.md b/docs/tutorials/computer_vision/classification/AlexNet.md
old mode 100644
new mode 100755
index 63fe19ffb..5ea0e9a61
--- a/docs/tutorials/computer_vision/classification/AlexNet.md
+++ b/docs/tutorials/computer_vision/classification/AlexNet.md
@@ -4,7 +4,7 @@
AlexNet[1]是2012年ImageNet竞赛的冠军模型,其作者是神经网络领域三巨头之一的Hinton和他的学生Alex Krizhevsky。
-AlexNet以极大的优势领先2012年ImageNet竞赛的第二名,也因此给当时的学术界和工业界带来了很大的冲击。此后,更多更深的神经网络相继被提出,比如优秀的VGG,GoogLeNet,ResNet等。
+AlexNet以极大的优势领先2012年ImageNet竞赛的第二名,也因此给当时的学术界和工业界带来了很大的冲击。此后,更多更深的神经网络相继被提出,比如优秀的VGG,GoogLeNet,ResNet等。
## 模型结构
@@ -63,7 +63,7 @@ class AlexNet(paddle.nn.Layer):
self.drop_ratio2 = 0.5
self.drop2 = Dropout(self.drop_ratio2)
self.fc3 = Linear(in_features=4096, out_features=num_classes)
-
+
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
@@ -116,4 +116,3 @@ AlexNet 作为 ImageNet 2012比赛的冠军算法,在 ImageNet 测试集上达
## 参考文献
[1] [Imagenet classification with deep convolutional neural networks. ](https://www.nvidia.cn/content/tesla/pdf/machine-learning/imagenet-classification-with-deep-convolutional-nn.pdf)
-
diff --git a/docs/tutorials/computer_vision/classification/DarkNet.md b/docs/tutorials/computer_vision/classification/DarkNet.md
old mode 100644
new mode 100755
index 37e647b05..f7c84cc13
--- a/docs/tutorials/computer_vision/classification/DarkNet.md
+++ b/docs/tutorials/computer_vision/classification/DarkNet.md
@@ -49,9 +49,9 @@ class ConvBNLayer(nn.Layer):
stride,
padding,
name=None):
- # 初始化函数
+ # 初始化函数
super(ConvBNLayer, self).__init__()
- # 创建卷积层
+ # 创建卷积层
self._conv = Conv2D(
in_channels=input_channels,
out_channels=output_channels,
@@ -60,7 +60,7 @@ class ConvBNLayer(nn.Layer):
padding=padding,
weight_attr=ParamAttr(name=name + ".conv.weights"),
bias_attr=False)
- # 创建批归一化层
+ # 创建批归一化层
bn_name = name + ".bn"
self._bn = BatchNorm(
num_channels=output_channels,
@@ -71,7 +71,7 @@ class ConvBNLayer(nn.Layer):
moving_variance_name=bn_name + ".var")
def forward(self, inputs):
- # 前向计算
+ # 前向计算
x = self._conv(inputs)
x = self._bn(x)
return x
@@ -79,16 +79,16 @@ class ConvBNLayer(nn.Layer):
# 定义残差块
class BasicBlock(nn.Layer):
def __init__(self, input_channels, output_channels, name=None):
- # 初始化函数
+ # 初始化函数
super(BasicBlock, self).__init__()
- # 定义两个卷积层
+ # 定义两个卷积层
self._conv1 = ConvBNLayer(
input_channels, output_channels, 1, 1, 0, name=name + ".0")
self._conv2 = ConvBNLayer(
output_channels, output_channels * 2, 3, 1, 1, name=name + ".1")
def forward(self, inputs):
- # 前向计算
+ # 前向计算
x = self._conv1(inputs)
x = self._conv2(x)
# 将第二个卷积层的输出和最初的输入值相加
@@ -97,16 +97,16 @@ class BasicBlock(nn.Layer):
class DarkNet53(nn.Layer):
def __init__(self, class_dim=1000):
- # 初始化函数
+ # 初始化函数
super(DarkNet, self).__init__()
- # DarkNet 每组残差块的个数,来自DarkNet的网络结构图
+ # DarkNet 每组残差块的个数,来自DarkNet的网络结构图
self.stages = [1, 2, 8, 8, 4]
# 第一层卷积
self._conv1 = ConvBNLayer(3, 32, 3, 1, 1, name="yolo_input")
# 下采样,使用stride=2的卷积来实现
self._conv2 = ConvBNLayer(
32, 64, 3, 2, 1, name="yolo_input.downsample")
- # 添加各个层级的实现
+ # 添加各个层级的实现
self._basic_block_01 = BasicBlock(64, 32, name="stage.0.0")
# 下采样,使用stride=2的卷积来实现
self._downsample_0 = ConvBNLayer(
@@ -146,7 +146,7 @@ class DarkNet53(nn.Layer):
self._basic_block_42 = BasicBlock(1024, 512, name="stage.4.1")
self._basic_block_43 = BasicBlock(1024, 512, name="stage.4.2")
self._basic_block_44 = BasicBlock(1024, 512, name="stage.4.3")
- # 自适应平均池化
+ # 自适应平均池化
self._pool = AdaptiveAvgPool2D(1)
stdv = 1.0 / math.sqrt(1024.0)
@@ -219,4 +219,3 @@ class DarkNet53(nn.Layer):
[1] [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242)
[2] [YOLOv3: An Incremental Improvement](https://pjreddie.com/media/files/papers/YOLOv3.pdf)
-
diff --git a/docs/tutorials/computer_vision/classification/GoogLeNet.md b/docs/tutorials/computer_vision/classification/GoogLeNet.md
old mode 100644
new mode 100755
index 48e315886..e1343e702
--- a/docs/tutorials/computer_vision/classification/GoogLeNet.md
+++ b/docs/tutorials/computer_vision/classification/GoogLeNet.md
@@ -49,11 +49,11 @@ class Inception(paddle.nn.Layer):
def __init__(self, c0, c1, c2, c3, c4, **kwargs):
'''
Inception模块的实现代码,
-
+
c1,图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数
- c2,图(b)中第二条支路卷积的输出通道数,数据类型是tuple或list,
+ c2,图(b)中第二条支路卷积的输出通道数,数据类型是tuple或list,
其中c2[0]是1x1卷积的输出通道数,c2[1]是3x3
- c3,图(b)中第三条支路卷积的输出通道数,数据类型是tuple或list,
+ c3,图(b)中第三条支路卷积的输出通道数,数据类型是tuple或list,
其中c3[0]是1x1卷积的输出通道数,c3[1]是3x3
c4,图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数
'''
@@ -160,11 +160,11 @@ class Inception(nn.Layer):
name=None):
'''
Inception模块的实现代码,
-
+
c1,图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数
- c2,图(b)中第二条支路卷积的输出通道数,数据类型是tuple或list,
+ c2,图(b)中第二条支路卷积的输出通道数,数据类型是tuple或list,
其中c2[0]是1x1卷积的输出通道数,c2[1]是3x3
- c3,图(b)中第三条支路卷积的输出通道数,数据类型是tuple或list,
+ c3,图(b)中第三条支路卷积的输出通道数,数据类型是tuple或list,
其中c3[0]是1x1卷积的输出通道数,c3[1]是3x3
c4,图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数
'''
@@ -314,4 +314,4 @@ GoogLeNet 在 2014 年的 ImageNet 比赛上取得了冠军的好成绩,具体
## 参考文献
-[1] [Going deeper with convolutions.](https://arxiv.org/abs/1409.4842)
\ No newline at end of file
+[1] [Going deeper with convolutions.](https://arxiv.org/abs/1409.4842)
diff --git a/docs/tutorials/computer_vision/classification/LeNet.md b/docs/tutorials/computer_vision/classification/LeNet.md
old mode 100644
new mode 100755
index d94e20ca1..6b10a80b2
--- a/docs/tutorials/computer_vision/classification/LeNet.md
+++ b/docs/tutorials/computer_vision/classification/LeNet.md
@@ -103,4 +103,4 @@ LeNet-5在MNIST手写数字识别任务上进行了模型训练与测试,论
## 参考文献
-[1] [Gradient-based learn- ing applied to document recognition.](http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf)
\ No newline at end of file
+[1] [Gradient-based learn- ing applied to document recognition.](http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf)
diff --git a/docs/tutorials/computer_vision/classification/Res2Net.md b/docs/tutorials/computer_vision/classification/Res2Net.md
old mode 100644
new mode 100755
index 4655baeb4..fb8e8178a
--- a/docs/tutorials/computer_vision/classification/Res2Net.md
+++ b/docs/tutorials/computer_vision/classification/Res2Net.md
@@ -1,184 +1,184 @@
-# Res2Net
-
-## 1. 模型介绍
-2020年,南开大学程明明组提出了一种面向目标检测任务的新模块Res2Net。并且其论文已被TPAMI2020录用。Res2Net和ResNeXt一样,是ResNet的变体形式,只不过Res2Net不止提高了分类任务的准确率,还提高了检测任务的精度。Res2Net的新模块可以和现有其他优秀模块轻松整合,在不增加计算负载量的情况下,在ImageNet、CIFAR-100等数据集上的测试性能超过了ResNet。因为模型的残差块里又有残差连接,所以取名为Res2Net。
-
-## 2. 模型结构
-
-
-模型结构看起来很简单,将输入的特征x,split为k个特征,第i+1(i = 0, 1, 2,...,k-1) 个特征经过3×3卷积后以残差连接的方式融合到第 i+2 个特征中。这就是Res2Net的主要结构。那么这样做的目的是为什么呢?能够有什么好处呢?
-答案就是多尺度卷积。多尺度特征在检测任务中一直是很重要的,自从空洞卷积提出以来,基于空洞卷积搭建的多尺度金字塔模型在检测任务上取得里程碑式的效果。不同感受野下获取的物体的信息是不同的,小的感受野可能会看到更多的物体细节,对于检测小目标也有很大的好处,而大的感受野可以感受物体的整体结构,方便网络定位物体的位置,细节与位置的结合可以更好地得到具有清晰边界的物体信息,因此,结合了多尺度金字塔的模型往往能获得很好地效果。在Res2Net中,特征k2经过3×3卷积后被送入x3所在的处理流中,k2再次被3×3的卷积优化信息,两个3×3的卷积相当于一个5×5的卷积。那么,k3就想当然与融合了3×3的感受野和5×5的感受野处理后的特征。以此类推,7×7的感受野被应用在k4中。就这样,Res2Net提取多尺度特征用于检测任务,以提高模型的准确率。在这篇论文中,s是比例尺寸的控制参数,也就是可以将输入通道数平均等分为多个特征通道。s越大表明多尺度能力越强,此外一些额外的计算开销也可以忽略。
-
-
-## 3. 模型实现
-Res2Net与ResNet的模型结构一致,主要差别在于block的搭建,因此这里用paddle框架来实现block的代码
-```python
-class ConvBNLayer(nn.Layer):
- def __init__(
- self,
- num_channels,
- num_filters,
- filter_size,
- stride=1,
- groups=1,
- is_vd_mode=False,
- act=None,
- name=None, ):
- super(ConvBNLayer, self).__init__()
-
- self.is_vd_mode = is_vd_mode
- self._pool2d_avg = AvgPool2D(
- kernel_size=2, stride=2, padding=0, ceil_mode=True)
- self._conv = Conv2D(
- in_channels=num_channels,
- out_channels=num_filters,
- kernel_size=filter_size,
- stride=stride,
- padding=(filter_size - 1) // 2,
- groups=groups,
- weight_attr=ParamAttr(name=name + "_weights"),
- bias_attr=False)
- if name == "conv1":
- bn_name = "bn_" + name
- else:
- bn_name = "bn" + name[3:]
- self._batch_norm = BatchNorm(
- num_filters,
- act=act,
- param_attr=ParamAttr(name=bn_name + '_scale'),
- bias_attr=ParamAttr(bn_name + '_offset'),
- moving_mean_name=bn_name + '_mean',
- moving_variance_name=bn_name + '_variance')
-
- def forward(self, inputs):
- if self.is_vd_mode:
- inputs = self._pool2d_avg(inputs)
- y = self._conv(inputs)
- y = self._batch_norm(y)
- return y
-
-
-class BottleneckBlock(nn.Layer):
- def __init__(self,
- num_channels1,
- num_channels2,
- num_filters,
- stride,
- scales,
- shortcut=True,
- if_first=False,
- name=None):
- super(BottleneckBlock, self).__init__()
- self.stride = stride
- self.scales = scales
- self.conv0 = ConvBNLayer(
- num_channels=num_channels1,
- num_filters=num_filters,
- filter_size=1,
- act='relu',
- name=name + "_branch2a")
- self.conv1_list = []
- for s in range(scales - 1):
- conv1 = self.add_sublayer(
- name + '_branch2b_' + str(s + 1),
- ConvBNLayer(
- num_channels=num_filters // scales,
- num_filters=num_filters // scales,
- filter_size=3,
- stride=stride,
- act='relu',
- name=name + '_branch2b_' + str(s + 1)))
- self.conv1_list.append(conv1)
- self.pool2d_avg = AvgPool2D(kernel_size=3, stride=stride, padding=1)
-
- self.conv2 = ConvBNLayer(
- num_channels=num_filters,
- num_filters=num_channels2,
- filter_size=1,
- act=None,
- name=name + "_branch2c")
-
- if not shortcut:
- self.short = ConvBNLayer(
- num_channels=num_channels1,
- num_filters=num_channels2,
- filter_size=1,
- stride=1,
- is_vd_mode=False if if_first else True,
- name=name + "_branch1")
-
- self.shortcut = shortcut
-
- def forward(self, inputs):
- y = self.conv0(inputs)
- xs = paddle.split(y, self.scales, 1)
- ys = []
- for s, conv1 in enumerate(self.conv1_list):
- if s == 0 or self.stride == 2:
- ys.append(conv1(xs[s]))
- else:
- ys.append(conv1(xs[s] + ys[-1]))
- if self.stride == 1:
- ys.append(xs[-1])
- else:
- ys.append(self.pool2d_avg(xs[-1]))
- conv1 = paddle.concat(ys, axis=1)
- conv2 = self.conv2(conv1)
-
- if self.shortcut:
- short = inputs
- else:
- short = self.short(inputs)
- y = paddle.add(x=short, y=conv2)
- y = F.relu(y)
- return y
-```
-## 4. 模型特点
-1. 可与其他结构整合,如SENEt, ResNeXt, DLA等,从而增加准确率。
-2. 计算负载不增加,特征提取能力更强大。
-
-## 5. 模型指标
-ImageNet分类效果如下图
-
-
-
-Res2Net-50就是对标ResNet50的版本。
-
-Res2Net-50-299指的是将输入图片裁剪到299×299进行预测的Res2Net-50,因为一般都是裁剪或者resize到224×224。
-
-Res2NeXt-50为融合了ResNeXt的Res2Net-50。
-
-Res2Net-DLA-60指的是融合了DLA-60的Res2Net-50。
-
-Res2NeXt-DLA-60为融合了ResNeXt和DLA-60的Res2Net-50。
-
-SE-Res2Net-50 为融合了SENet的Res2Net-50。
-
-blRes2Net-50为融合了Big-Little Net的Res2Net-50。
-
-Res2Net-v1b-50为采取和ResNet-vd-50一样的处理方法的Res2Net-50。
-
-Res2Net-200-SSLD为Paddle使用简单的半监督标签知识蒸馏(SSLD,Simple Semi-supervised Label Distillation)的方法来提升模型效果得到的。具体详情可以见[半监督知识蒸馏](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.2/docs/zh_CN/advanced_tutorials/distillation/distillation.md)
-
-可见,Res2Net都取得了十分不错的成绩。
-
-COCO数据集效果如下图
-
-
-
-Res2Net-50的各种配置都比ResNet-50高。
-
-显著目标检测数据集指标效果如下图
-
-
-
-ECSSD、PASCAL-S、DUT-OMRON、HKU-IS都是显著目标检测任务中现在最为常用的测试集,显著目标检测任务的目的就是分割出图片中的显著物体,并用白色像素点表示,其他背景用黑色像素点表示。从图中可以看出来,使用Res2Net作为骨干网络,效果比ResNet有了很大的提升。
-
-## 6. 参考文献
-[Res2Net](https://arxiv.org/pdf/1904.01169.pdf)
-
-
-
-```python
-
-```
+# Res2Net
+
+## 1. 模型介绍
+2020年,南开大学程明明组提出了一种面向目标检测任务的新模块Res2Net。并且其论文已被TPAMI2020录用。Res2Net和ResNeXt一样,是ResNet的变体形式,只不过Res2Net不止提高了分类任务的准确率,还提高了检测任务的精度。Res2Net的新模块可以和现有其他优秀模块轻松整合,在不增加计算负载量的情况下,在ImageNet、CIFAR-100等数据集上的测试性能超过了ResNet。因为模型的残差块里又有残差连接,所以取名为Res2Net。
+
+## 2. 模型结构
+
+
+模型结构看起来很简单,将输入的特征x,split为k个特征,第i+1(i = 0, 1, 2,...,k-1) 个特征经过3×3卷积后以残差连接的方式融合到第 i+2 个特征中。这就是Res2Net的主要结构。那么这样做的目的是为什么呢?能够有什么好处呢?
+答案就是多尺度卷积。多尺度特征在检测任务中一直是很重要的,自从空洞卷积提出以来,基于空洞卷积搭建的多尺度金字塔模型在检测任务上取得里程碑式的效果。不同感受野下获取的物体的信息是不同的,小的感受野可能会看到更多的物体细节,对于检测小目标也有很大的好处,而大的感受野可以感受物体的整体结构,方便网络定位物体的位置,细节与位置的结合可以更好地得到具有清晰边界的物体信息,因此,结合了多尺度金字塔的模型往往能获得很好地效果。在Res2Net中,特征k2经过3×3卷积后被送入x3所在的处理流中,k2再次被3×3的卷积优化信息,两个3×3的卷积相当于一个5×5的卷积。那么,k3就想当然与融合了3×3的感受野和5×5的感受野处理后的特征。以此类推,7×7的感受野被应用在k4中。就这样,Res2Net提取多尺度特征用于检测任务,以提高模型的准确率。在这篇论文中,s是比例尺寸的控制参数,也就是可以将输入通道数平均等分为多个特征通道。s越大表明多尺度能力越强,此外一些额外的计算开销也可以忽略。
+
+
+## 3. 模型实现
+Res2Net与ResNet的模型结构一致,主要差别在于block的搭建,因此这里用paddle框架来实现block的代码
+```python
+class ConvBNLayer(nn.Layer):
+ def __init__(
+ self,
+ num_channels,
+ num_filters,
+ filter_size,
+ stride=1,
+ groups=1,
+ is_vd_mode=False,
+ act=None,
+ name=None, ):
+ super(ConvBNLayer, self).__init__()
+
+ self.is_vd_mode = is_vd_mode
+ self._pool2d_avg = AvgPool2D(
+ kernel_size=2, stride=2, padding=0, ceil_mode=True)
+ self._conv = Conv2D(
+ in_channels=num_channels,
+ out_channels=num_filters,
+ kernel_size=filter_size,
+ stride=stride,
+ padding=(filter_size - 1) // 2,
+ groups=groups,
+ weight_attr=ParamAttr(name=name + "_weights"),
+ bias_attr=False)
+ if name == "conv1":
+ bn_name = "bn_" + name
+ else:
+ bn_name = "bn" + name[3:]
+ self._batch_norm = BatchNorm(
+ num_filters,
+ act=act,
+ param_attr=ParamAttr(name=bn_name + '_scale'),
+ bias_attr=ParamAttr(bn_name + '_offset'),
+ moving_mean_name=bn_name + '_mean',
+ moving_variance_name=bn_name + '_variance')
+
+ def forward(self, inputs):
+ if self.is_vd_mode:
+ inputs = self._pool2d_avg(inputs)
+ y = self._conv(inputs)
+ y = self._batch_norm(y)
+ return y
+
+
+class BottleneckBlock(nn.Layer):
+ def __init__(self,
+ num_channels1,
+ num_channels2,
+ num_filters,
+ stride,
+ scales,
+ shortcut=True,
+ if_first=False,
+ name=None):
+ super(BottleneckBlock, self).__init__()
+ self.stride = stride
+ self.scales = scales
+ self.conv0 = ConvBNLayer(
+ num_channels=num_channels1,
+ num_filters=num_filters,
+ filter_size=1,
+ act='relu',
+ name=name + "_branch2a")
+ self.conv1_list = []
+ for s in range(scales - 1):
+ conv1 = self.add_sublayer(
+ name + '_branch2b_' + str(s + 1),
+ ConvBNLayer(
+ num_channels=num_filters // scales,
+ num_filters=num_filters // scales,
+ filter_size=3,
+ stride=stride,
+ act='relu',
+ name=name + '_branch2b_' + str(s + 1)))
+ self.conv1_list.append(conv1)
+ self.pool2d_avg = AvgPool2D(kernel_size=3, stride=stride, padding=1)
+
+ self.conv2 = ConvBNLayer(
+ num_channels=num_filters,
+ num_filters=num_channels2,
+ filter_size=1,
+ act=None,
+ name=name + "_branch2c")
+
+ if not shortcut:
+ self.short = ConvBNLayer(
+ num_channels=num_channels1,
+ num_filters=num_channels2,
+ filter_size=1,
+ stride=1,
+ is_vd_mode=False if if_first else True,
+ name=name + "_branch1")
+
+ self.shortcut = shortcut
+
+ def forward(self, inputs):
+ y = self.conv0(inputs)
+ xs = paddle.split(y, self.scales, 1)
+ ys = []
+ for s, conv1 in enumerate(self.conv1_list):
+ if s == 0 or self.stride == 2:
+ ys.append(conv1(xs[s]))
+ else:
+ ys.append(conv1(xs[s] + ys[-1]))
+ if self.stride == 1:
+ ys.append(xs[-1])
+ else:
+ ys.append(self.pool2d_avg(xs[-1]))
+ conv1 = paddle.concat(ys, axis=1)
+ conv2 = self.conv2(conv1)
+
+ if self.shortcut:
+ short = inputs
+ else:
+ short = self.short(inputs)
+ y = paddle.add(x=short, y=conv2)
+ y = F.relu(y)
+ return y
+```
+## 4. 模型特点
+1. 可与其他结构整合,如SENEt, ResNeXt, DLA等,从而增加准确率。
+2. 计算负载不增加,特征提取能力更强大。
+
+## 5. 模型指标
+ImageNet分类效果如下图
+
+
+
+Res2Net-50就是对标ResNet50的版本。
+
+Res2Net-50-299指的是将输入图片裁剪到299×299进行预测的Res2Net-50,因为一般都是裁剪或者resize到224×224。
+
+Res2NeXt-50为融合了ResNeXt的Res2Net-50。
+
+Res2Net-DLA-60指的是融合了DLA-60的Res2Net-50。
+
+Res2NeXt-DLA-60为融合了ResNeXt和DLA-60的Res2Net-50。
+
+SE-Res2Net-50 为融合了SENet的Res2Net-50。
+
+blRes2Net-50为融合了Big-Little Net的Res2Net-50。
+
+Res2Net-v1b-50为采取和ResNet-vd-50一样的处理方法的Res2Net-50。
+
+Res2Net-200-SSLD为Paddle使用简单的半监督标签知识蒸馏(SSLD,Simple Semi-supervised Label Distillation)的方法来提升模型效果得到的。具体详情可以见[半监督知识蒸馏](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.2/docs/zh_CN/advanced_tutorials/distillation/distillation.md)
+
+可见,Res2Net都取得了十分不错的成绩。
+
+COCO数据集效果如下图
+
+
+
+Res2Net-50的各种配置都比ResNet-50高。
+
+显著目标检测数据集指标效果如下图
+
+
+
+ECSSD、PASCAL-S、DUT-OMRON、HKU-IS都是显著目标检测任务中现在最为常用的测试集,显著目标检测任务的目的就是分割出图片中的显著物体,并用白色像素点表示,其他背景用黑色像素点表示。从图中可以看出来,使用Res2Net作为骨干网络,效果比ResNet有了很大的提升。
+
+## 6. 参考文献
+[Res2Net](https://arxiv.org/pdf/1904.01169.pdf)
+
+
+
+```python
+
+```
diff --git a/docs/tutorials/computer_vision/classification/ResNeXt.md b/docs/tutorials/computer_vision/classification/ResNeXt.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/classification/ResNet.md b/docs/tutorials/computer_vision/classification/ResNet.md
old mode 100644
new mode 100755
index 7125e8dd0..f23f98f75
--- a/docs/tutorials/computer_vision/classification/ResNet.md
+++ b/docs/tutorials/computer_vision/classification/ResNet.md
@@ -81,4 +81,3 @@ Sigmoid 函数的导数 $\sigma^{'}(x)$ 如 **图3** 所示:
[1] [Visualizing and Understanding Convolutional Networks](https://arxiv.org/pdf/1311.2901.pdf)
[2] [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385.pdf)
-
diff --git a/docs/tutorials/computer_vision/classification/Swin Transformer.md b/docs/tutorials/computer_vision/classification/Swin Transformer.md
old mode 100644
new mode 100755
index 092c7ee58..38a040f1d
--- a/docs/tutorials/computer_vision/classification/Swin Transformer.md
+++ b/docs/tutorials/computer_vision/classification/Swin Transformer.md
@@ -57,8 +57,8 @@ class PatchEmbed(nn.Layer):
def forward(self, x):
B, C, H, W = x.shape
-
- x = self.proj(x) # B, 96, H/4, W4
+
+ x = self.proj(x) # B, 96, H/4, W4
x = x.flatten(2).transpose([0, 2, 1]) # B Ph*Pw 96
if self.norm is not None:
@@ -85,7 +85,7 @@ class PatchMerging(nn.Layer):
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias_attr=False)
self.norm = norm_layer(4 * dim)
-
+
def forward(self, x):
"""
x: B, H*W, C
@@ -104,7 +104,7 @@ class PatchMerging(nn.Layer):
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
# 拼接在一起作为一整个张量,展开。通道维度会变成原先的4倍(因为H,W各缩小2倍)
x = paddle.concat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
- x = x.reshape([B, H * W // 4, 4 * C]) # B H/2*W/2 4*C
+ x = x.reshape([B, H * W // 4, 4 * C]) # B H/2*W/2 4*C
x = self.norm(x)
# 通过一个全连接层再调整通道维度为原来的两倍
diff --git a/docs/tutorials/computer_vision/classification/VGG.md b/docs/tutorials/computer_vision/classification/VGG.md
old mode 100644
new mode 100755
index 8ab5256a1..56a4122fb
--- a/docs/tutorials/computer_vision/classification/VGG.md
+++ b/docs/tutorials/computer_vision/classification/VGG.md
@@ -120,4 +120,3 @@ VGG 在 2014 年的 ImageNet 比赛上取得了亚军的好成绩,具体指标
## 参考文献
[1] [Very deep convolutional networks for large-scale image recognition.](https://arxiv.org/pdf/1409.1556.pdf)
-
diff --git a/docs/tutorials/computer_vision/classification/ViT.md b/docs/tutorials/computer_vision/classification/ViT.md
old mode 100644
new mode 100755
index 7bd5b2399..2af5ffe76
--- a/docs/tutorials/computer_vision/classification/ViT.md
+++ b/docs/tutorials/computer_vision/classification/ViT.md
@@ -388,7 +388,7 @@ class VisionTransformer(nn.Layer):
elif isinstance(m, nn.LayerNorm):
zeros_(m.bias)
ones_(m.weight)
-
+
def forward_features(self, x):
B = paddle.shape(x)[0]
# 将图片分块,并调整每个块向量的维度
@@ -430,4 +430,4 @@ ViT模型在常用数据集上进行迁移学习,最终指标如 **图10** 所
## 参考文献
-[1] [An Image is Worth 16x16 Words:Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929)
\ No newline at end of file
+[1] [An Image is Worth 16x16 Words:Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929)
diff --git a/docs/tutorials/computer_vision/classification/index.rst b/docs/tutorials/computer_vision/classification/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/image_augmentation/ImageAugment.md b/docs/tutorials/computer_vision/image_augmentation/ImageAugment.md
old mode 100644
new mode 100755
index ffa1f377e..ee92f21a8
--- a/docs/tutorials/computer_vision/image_augmentation/ImageAugment.md
+++ b/docs/tutorials/computer_vision/image_augmentation/ImageAugment.md
@@ -254,4 +254,4 @@ Mixup 是最先提出的图像混叠增广方案,其原理简单、方便实
[7] [mixup: Beyond empirical risk minimization](https://arxiv.org/pdf/1710.09412.pdf)
-[8] [Cutmix: Regularization strategy to train strong classifiers with localizable features](https://arxiv.org/pdf/1905.04899v2.pdf))
\ No newline at end of file
+[8] [Cutmix: Regularization strategy to train strong classifiers with localizable features](https://arxiv.org/pdf/1905.04899v2.pdf))
diff --git a/docs/tutorials/computer_vision/image_augmentation/index.rst b/docs/tutorials/computer_vision/image_augmentation/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/image_augmentation/tta.md b/docs/tutorials/computer_vision/image_augmentation/tta.md
old mode 100644
new mode 100755
index f46d6d60e..293fe1337
--- a/docs/tutorials/computer_vision/image_augmentation/tta.md
+++ b/docs/tutorials/computer_vision/image_augmentation/tta.md
@@ -15,5 +15,3 @@
## 2. 测试时增强的作用
通过分析多项数据增强的图像,然后综合分析,有可能会平滑掉某一种变换导致的关键信息丢失现象带来的损失,从而提升预测的准确率。
-
-
diff --git a/docs/tutorials/computer_vision/index.rst b/docs/tutorials/computer_vision/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/object_detection/Bounding_Box_Anchor.md b/docs/tutorials/computer_vision/object_detection/Bounding_Box_Anchor.md
old mode 100644
new mode 100755
index 4dabf0a54..42a58021c
--- a/docs/tutorials/computer_vision/object_detection/Bounding_Box_Anchor.md
+++ b/docs/tutorials/computer_vision/object_detection/Bounding_Box_Anchor.md
@@ -64,7 +64,7 @@ import matplotlib.patches as patches
from matplotlib.image import imread
import math
-# 定义画矩形框的程序
+# 定义画矩形框的程序
def draw_rectangle(currentAxis, bbox, edgecolor = 'k', facecolor = 'y', fill=False, linestyle='-'):
# currentAxis,坐标轴,通过plt.gca()获取
# bbox,边界框,包含四个数值的list, [x1, y1, x2, y2]
@@ -79,7 +79,7 @@ def draw_rectangle(currentAxis, bbox, edgecolor = 'k', facecolor = 'y', fill=Fal
edgecolor=edgecolor,facecolor=facecolor,fill=fill, linestyle=linestyle)
currentAxis.add_patch(rect)
-
+
plt.figure(figsize=(10, 10))
# 传入图片路径
filename = '/home/aistudio/work/images/section3/000000086956.jpg'
@@ -110,7 +110,7 @@ def draw_anchor_box(center, length, scales, ratios, img_height, img_width):
for scale in scales:
for ratio in ratios:
h = length*scale*math.sqrt(ratio)
- w = length*scale/math.sqrt(ratio)
+ w = length*scale/math.sqrt(ratio)
x1 = max(center[0] - w/2., 0.)
y1 = max(center[1] - h/2., 0.)
x2 = min(center[0] + w/2. - 1.0, img_width - 1.0)
@@ -122,7 +122,7 @@ def draw_anchor_box(center, length, scales, ratios, img_height, img_width):
draw_rectangle(currentAxis, bbox, edgecolor = 'b')
img_height = im.shape[0]
-img_width = im.shape[1]
+img_width = im.shape[1]
# 绘制锚框
draw_anchor_box([300., 500.], 100., [2.0], [0.5, 1.0, 2.0], img_height, img_width)
@@ -153,4 +153,3 @@ plt.show()
[2] [YOLO9000: Better, Faster, Stronger](https://arxiv.org/pdf/1612.08242v1.pdf)
[3] [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/pdf/1506.02640.pdf)
-
diff --git a/docs/tutorials/computer_vision/object_detection/Detection.md b/docs/tutorials/computer_vision/object_detection/Detection.md
old mode 100644
new mode 100755
index bca0a8ad8..1d026f901
--- a/docs/tutorials/computer_vision/object_detection/Detection.md
+++ b/docs/tutorials/computer_vision/object_detection/Detection.md
@@ -6,7 +6,7 @@
图1 图像分类和目标检测示意图
-* 图1(a)是图像分类任务,只需对这张图片进行类别识别。
+* 图1(a)是图像分类任务,只需对这张图片进行类别识别。
* 图1(b)是目标检测任务,不仅要识别出这一张图片中的类别为斑马,还要标出图中斑马的位置。
## 应用场景
@@ -112,13 +112,13 @@ Anchor-base和Anchor-free的算法也各具优势,下表为大家简单对比
## 参考文献
-[1] [Rich feature hierarchies for accurate object detection and semantic segmentation](https://arxiv.org/abs/1311.2524)
+[1] [Rich feature hierarchies for accurate object detection and semantic segmentation](https://arxiv.org/abs/1311.2524)
-[2] [Fast R-CNN](https://arxiv.org/abs/1504.08083)
+[2] [Fast R-CNN](https://arxiv.org/abs/1504.08083)
-[3] [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497)
+[3] [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497)
-[4] [Mask R-CNN](https://arxiv.org/abs/1703.06870)
+[4] [Mask R-CNN](https://arxiv.org/abs/1703.06870)
[5] [SSD: Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325)
@@ -143,4 +143,3 @@ Anchor-base和Anchor-free的算法也各具优势,下表为大家简单对比
[15] [Object365](http://www.objects365.org/download.html)
[16] [OpenImages](http://storage.googleapis.com/openimages/web/download.html)
-
diff --git a/docs/tutorials/computer_vision/object_detection/IOU.md b/docs/tutorials/computer_vision/object_detection/IOU.md
old mode 100644
new mode 100755
index 27d360901..60bf26bb8
--- a/docs/tutorials/computer_vision/object_detection/IOU.md
+++ b/docs/tutorials/computer_vision/object_detection/IOU.md
@@ -70,7 +70,7 @@ def box_iou_xyxy(box1, box2):
x2min, y2min, x2max, y2max = box2[0], box2[1], box2[2], box2[3]
# 计算box2的面积
s2 = (y2max - y2min + 1.) * (x2max - x2min + 1.)
-
+
# 计算相交矩形框的坐标
xmin = np.maximum(x1min, x2min)
ymin = np.maximum(y1min, y2min)
@@ -136,11 +136,10 @@ print('IoU is {}'.format(iou))
**问题:**
-1. 什么情况下两个矩形框的IoU等于1?
+1. 什么情况下两个矩形框的IoU等于1?
答案:两个矩形框完全重合。
1. 什么情况下两个矩形框的IoU等于0?
答案:两个矩形框完全不相交。
-
diff --git a/docs/tutorials/computer_vision/object_detection/NMS.md b/docs/tutorials/computer_vision/object_detection/NMS.md
old mode 100644
new mode 100755
index c1ef3961b..b4e7f447c
--- a/docs/tutorials/computer_vision/object_detection/NMS.md
+++ b/docs/tutorials/computer_vision/object_detection/NMS.md
@@ -19,7 +19,7 @@ import matplotlib.patches as patches
from matplotlib.image import imread
import math
-# 定义画矩形框的程序
+# 定义画矩形框的程序
def draw_rectangle(currentAxis, bbox, edgecolor = 'k', facecolor = 'y', fill=False, linestyle='-'):
# currentAxis,坐标轴,通过plt.gca()获取
# bbox,边界框,包含四个数值的list, [x1, y1, x2, y2]
@@ -27,14 +27,14 @@ def draw_rectangle(currentAxis, bbox, edgecolor = 'k', facecolor = 'y', fill=Fal
# facecolor,填充颜色
# fill, 是否填充
# linestype,边框线型
-
+
# patches.Rectangle(xy, width, height,linewidth,edgecolor,facecolor,fill, linestyle)
# xy:左下角坐标; width:矩形框的宽; height:矩形框的高; linewidth:线宽; edgecolor:边界颜色; facecolor:填充颜色; fill:是否填充; linestyle:线断类型
rect=patches.Rectangle((bbox[0], bbox[1]), bbox[2]-bbox[0]+1, bbox[3]-bbox[1]+1, linewidth=1,
edgecolor=edgecolor,facecolor=facecolor,fill=fill, linestyle=linestyle)
currentAxis.add_patch(rect)
-
+
plt.figure(figsize=(10, 10))
# 传入图片路径
filename = '/home/aistudio/work/images/section3/000000086956.jpg'
@@ -74,7 +74,7 @@ for box in boxes:
比如在上面的程序中,boxes里面一共对应11个预测框,scores给出了它们预测"人"这一类别的得分,NMS的具体做法如下。
- Step0:创建选中列表,keep_list = []
-- Step1:对得分进行排序,remain_list = [ 3, 5, 10, 2, 9, 0, 1, 6, 4, 7, 8],
+- Step1:对得分进行排序,remain_list = [ 3, 5, 10, 2, 9, 0, 1, 6, 4, 7, 8],
- Step2:选出boxes[3],此时keep_list为空,不需要计算IoU,直接将其放入keep_list,keep_list = [3], remain_list=[5, 10, 2, 9, 0, 1, 6, 4, 7, 8]
- Step3:选出boxes[5],此时keep_list中已经存在boxes[3],计算出IoU(boxes[3], boxes[5]) = 0.0,显然小于阈值,则keep_list=[3, 5], remain_list = [10, 2, 9, 0, 1, 6, 4, 7, 8]
- Step4:选出boxes[10],此时keep_list=[3, 5],计算IoU(boxes[3], boxes[10])=0.0268,IoU(boxes[5], boxes[10])=0.0268 = 0.24,都小于阈值,则keep_list = [3, 5, 10],remain_list=[2, 9, 0, 1, 6, 4, 7, 8]
@@ -133,7 +133,7 @@ import matplotlib.patches as patches
from matplotlib.image import imread
import math
-# 定义画矩形框的程序
+# 定义画矩形框的程序
def draw_rectangle(currentAxis, bbox, edgecolor = 'k', facecolor = 'y', fill=False, linestyle='-'):
# currentAxis,坐标轴,通过plt.gca()获取
# bbox,边界框,包含四个数值的list, [x1, y1, x2, y2]
@@ -146,7 +146,7 @@ def draw_rectangle(currentAxis, bbox, edgecolor = 'k', facecolor = 'y', fill=Fal
edgecolor=edgecolor,facecolor=facecolor,fill=fill, linestyle=linestyle)
currentAxis.add_patch(rect)
-
+
plt.figure(figsize=(10, 10))
filename = '/home/aistudio/work/images/section3/000000086956.jpg'
@@ -166,7 +166,7 @@ boxes = np.array([[4.21716537e+01, 1.28230896e+02, 2.26547668e+02, 6.00434631e+0
[2.17988785e+02, 3.02472412e+02, 4.06062927e+02, 6.29106628e+02],
[2.00241089e+02, 3.23755096e+02, 3.96929321e+02, 6.36386108e+02],
[2.14310303e+02, 3.23443665e+02, 4.06732849e+02, 6.35775269e+02]])
-
+
scores = np.array([0.5247661 , 0.51759845, 0.86075854, 0.9910175 , 0.39170712,
0.9297706 , 0.5115228 , 0.270992 , 0.19087596, 0.64201415, 0.879036])
@@ -228,4 +228,4 @@ def multiclass_nms(bboxes, scores, score_thresh=0.01, nms_thresh=0.45, pre_nms_t
rets.append(ret_i)
return rets
-```
\ No newline at end of file
+```
diff --git a/docs/tutorials/computer_vision/object_detection/SoftNMS.md b/docs/tutorials/computer_vision/object_detection/SoftNMS.md
old mode 100644
new mode 100755
index da41a0b22..7f4b5f485
--- a/docs/tutorials/computer_vision/object_detection/SoftNMS.md
+++ b/docs/tutorials/computer_vision/object_detection/SoftNMS.md
@@ -13,7 +13,7 @@ NMS(非极大值抑制)方法是目标检测任务中常用的后处理方
$$
s_i = \{\begin{matrix}
- s_i,iou(M,b_i)图24 CamVid数据集示例
-#### 3.1.9. **KITTI**
+#### 3.1.9. **KITTI**
该数据集是用于移动机器人及自动驾驶研究的最受欢迎的数据集之一,包含了由多种形式的传感器得出的数小时的交通场景数据,包括高分辨率RGB、灰度立体摄像机以及三维激光扫描器。尽管很受欢迎,该数据集本身并没有包含真实语义分割标注,但是,众多的研究者手工地为该数据集的部分数据添加标注以满足其问题的需求。有人为道路检测竞赛中的323张图片生成了真实标注,包含三个类别:道路、垂直面和天空。还有人标注了252张图片,其中140张训练、112张测试,其选自追踪竞赛中的RGB和Velodyne扫描数据,共十个类。有学者在视觉测距数据集中标注了170个训练图片和46个测试图片,共11个类。
@@ -502,4 +502,3 @@ Deeplab v3在原有基础上的改动是:
[8] [A survey on indoor RGB-D semantic segmentation: from hand-crafted features to deep convolutional neural networks](https://link.springer.com/article/10.1007%2Fs11042-019-7684-3)
[9] [A Review on Deep Learning Techniques Applied to Semantic Segmentation](https://arxiv.org/abs/1704.06857)
-
diff --git a/docs/tutorials/computer_vision/semantic_segmentation/Overview/index.rst b/docs/tutorials/computer_vision/semantic_segmentation/Overview/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/semantic_segmentation/index.rst b/docs/tutorials/computer_vision/semantic_segmentation/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/computer_vision/video_understanding/TSM.md b/docs/tutorials/computer_vision/video_understanding/TSM.md
old mode 100644
new mode 100755
index 39039fb46..189c4aa10
--- a/docs/tutorials/computer_vision/video_understanding/TSM.md
+++ b/docs/tutorials/computer_vision/video_understanding/TSM.md
@@ -48,7 +48,7 @@ Temporal Shift Module(TSM) 如 **图1** 所示,在 **图1 a** 中,作者描
图2 不同比例的通道位移下延迟与准确率对比
-2. **保持空间特征学习能力。** 一种简单的TSM使用方法是将其直接插入到每个卷基层或残差模块前,如 **图3 a** 所示,这种方法被称为 in-place shift,但是它会损失主干模型的空间特征学习能力,尤其当我们移动大量通道时,存储在通道中的当前帧信息会随着通道移动而丢失。为解决这个问题,作者提出了另一种方法,即将TSM放在残差模块的残差分支中,这种方法被称为 residual TSM,如 **图3 b** 所示,它可以解决退化的空间特征学习问题,因为原始的激活信息在时间转移后仍可通过identity映射访问。
+2. **保持空间特征学习能力。** 一种简单的TSM使用方法是将其直接插入到每个卷基层或残差模块前,如 **图3 a** 所示,这种方法被称为 in-place shift,但是它会损失主干模型的空间特征学习能力,尤其当我们移动大量通道时,存储在通道中的当前帧信息会随着通道移动而丢失。为解决这个问题,作者提出了另一种方法,即将TSM放在残差模块的残差分支中,这种方法被称为 residual TSM,如 **图3 b** 所示,它可以解决退化的空间特征学习问题,因为原始的激活信息在时间转移后仍可通过identity映射访问。

@@ -58,7 +58,7 @@ Temporal Shift Module(TSM) 如 **图1** 所示,在 **图1 a** 中,作者描
### 2.4 TSM 视频网络
-**Offline Models with Bi-directional TSM**
+**Offline Models with Bi-directional TSM**
作者使用双向TSM来构建离线视频识别模型。给定视频 V,首先从视频中采样T帧 $F_1, ..., F_T$。帧采样后,2D CNN单独处理每个帧,并对输出logits求平均值以给出最终预测。我们为每个残差模块插入了TSM,无需计算即可实现时间信息融合。在论文中,作者使用ResNet50作为网络主干。
@@ -86,5 +86,4 @@ Temporal Shift Module(TSM) 如 **图1** 所示,在 **图1 a** 中,作者描
在第二部分中,TSM与高效视频理解框架ECO进行对比。ECO使用早期2D + 晚期3D的结构,可实现中级时间融合。与ECO相比,TSM在较小的FLOP上获得了更好的性能。
-第三部分包含当前的最新方法: Non-local I3D + GCN,可实现所有级别的时间融合。但由于GCN需要使用一个在MSCOCO对象检测数据集上训练的地区提议网络来生成边界框,这引入了额外的数据和训练成本,因此不能公平的进行比较。只将TSM与它的CNN部分(Non-local I3D)比较的话,TSM在验证集上的FLOP减小了10倍,精度提升1.2%。
-
+第三部分包含当前的最新方法: Non-local I3D + GCN,可实现所有级别的时间融合。但由于GCN需要使用一个在MSCOCO对象检测数据集上训练的地区提议网络来生成边界框,这引入了额外的数据和训练成本,因此不能公平的进行比较。只将TSM与它的CNN部分(Non-local I3D)比较的话,TSM在验证集上的FLOP减小了10倍,精度提升1.2%。
diff --git a/docs/tutorials/computer_vision/video_understanding/TimeSformer.md b/docs/tutorials/computer_vision/video_understanding/TimeSformer.md
old mode 100644
new mode 100755
index f4f28b8c0..0ac3cf814
--- a/docs/tutorials/computer_vision/video_understanding/TimeSformer.md
+++ b/docs/tutorials/computer_vision/video_understanding/TimeSformer.md
@@ -122,4 +122,3 @@ $$
[2] Huang, Z., Wang, X., Huang, L., Huang, C., Wei, Y., and Liu, W. Ccnet: Criss-cross attention for semantic seg- mentation. 2019. https://openaccess.thecvf.com/content_ICCV_2019/papers/Huang_CCNet_Criss-Cross_Attention_for_Semantic_Segmentation_ICCV_2019_paper.pdf
[3] Wang, H., Zhu, Y., Green, B., Adam, H., Yuille, A. L., and Chen, L. Axial-deeplab: Stand-alone axial-attention for panoptic segmentation. In *Computer Vision - ECCV 2020 - 16th European Conference*, 2020b. https://link.springer.com/chapter/10.1007/978-3-030-58548-8_7
-
diff --git a/docs/tutorials/computer_vision/video_understanding/index.rst b/docs/tutorials/computer_vision/video_understanding/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/activation_functions/Activation_Function.md b/docs/tutorials/deep_learning/activation_functions/Activation_Function.md
old mode 100644
new mode 100755
index 25c18021e..e7c92c2cb
--- a/docs/tutorials/deep_learning/activation_functions/Activation_Function.md
+++ b/docs/tutorials/deep_learning/activation_functions/Activation_Function.md
@@ -387,4 +387,3 @@ $$s^{'}\left( x \right) =s\left( x \right) \left( 1-s\left( x \right) \right) \i
2. $softmax$ 是 $sigmoid$ 的扩展,因为,当类别数 $k=2$ 时,$softmax$ 回归退化为 $logistic$ 回归。
3. $softmax$ 建模使用的分布是多项式分布,而 $logistic$ 则基于伯努利分布。
4. 多个 $logistic$ 回归通过叠加也同样可以实现多分类的效果,但是 $softmax$ 回归进行的多分类,类与类之间是互斥的,即一个输入只能被归为一类;多 $logistic$ 回归进行多分类,输出的类别并不是互斥的,即"苹果"这个词语既属于"水果"类也属于"$3C$"类别。
-
diff --git a/docs/tutorials/deep_learning/activation_functions/index.rst b/docs/tutorials/deep_learning/activation_functions/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/basic_concepts/index.rst b/docs/tutorials/deep_learning/basic_concepts/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/basic_concepts/multilayer_perceptron.md b/docs/tutorials/deep_learning/basic_concepts/multilayer_perceptron.md
old mode 100644
new mode 100755
index c699bfa4d..ca9a0a92d
--- a/docs/tutorials/deep_learning/basic_concepts/multilayer_perceptron.md
+++ b/docs/tutorials/deep_learning/basic_concepts/multilayer_perceptron.md
@@ -9,4 +9,3 @@
图1 多层感知机模型
在多层感知机中,相邻层所包含的神经元之间通常使用“全连接”方式进行连接。所谓“全连接”是指两个相邻层之间的神经元相互成对连接,但同一层内神经元之间没有连接。多层感知机可以模拟复杂非线性函数功能,所模拟函数的复杂性取决于网络隐藏层数目和各层中神经元数目。
-
diff --git a/docs/tutorials/deep_learning/basic_concepts/neuron.md b/docs/tutorials/deep_learning/basic_concepts/neuron.md
old mode 100644
new mode 100755
index 5a5a6d9db..3ef3e0be2
--- a/docs/tutorials/deep_learning/basic_concepts/neuron.md
+++ b/docs/tutorials/deep_learning/basic_concepts/neuron.md
@@ -17,6 +17,3 @@ $$
$)大于阈值 $\theta$,则函数 $\varPhi \left( \right)$ 的输出为1、否则为0。也就是说,如果线性加权累加结果大于阈值 $\theta$,则神经元细胞处于兴奋状态,向后传递 1 的信息,否则该神经元细胞处于抑制状态而不向后传递信息。
从另外一个角度来看,对于任何输入数据 $x_i$ ($1\le i\le n$),MCP 模型可得到 1 或 0 这样的输出结果,实现了将输入数据分类到 1 或 0 两个类别中,解决了二分类问题。
-
-
-
diff --git a/docs/tutorials/deep_learning/basic_concepts/single_layer_perceptron.md b/docs/tutorials/deep_learning/basic_concepts/single_layer_perceptron.md
old mode 100644
new mode 100755
index 2147960ab..51019d1b3
--- a/docs/tutorials/deep_learning/basic_concepts/single_layer_perceptron.md
+++ b/docs/tutorials/deep_learning/basic_concepts/single_layer_perceptron.md
@@ -17,7 +17,7 @@ $$
$$
-sign(x) =
+sign(x) =
\begin{cases}
+1 \qquad & x \geq 0 \\
-1 \qquad & x \lt 0
@@ -38,12 +38,12 @@ $$
3. 将训练样本输入到单层感知机中,根据模型公式,得到实际输出 $y$ ;
4. 根据如下公式更新权重系数;
-
-
+
+
$$
w^{m+1} = w^m + \eta[d^m - y^m]x^m
$$
-
+
5. 当满足收敛条件时,算法结束;若不满足收敛条件则输入下一条样本继续训练,即 $m = m +1$。通常收敛条件可为:
* 误差小于某个预先设定的较小值 $\epsilon$ ;
@@ -59,4 +59,3 @@ $$
图2 单层感知机模拟不同逻辑函数功能的示意图
单层感知机可被用来区分线性可分数据。在 **图2** 中,逻辑与(AND)、逻辑与非(NAND)和逻辑或(OR)为线性可分函数,所以可利用单层感知机来模拟这些逻辑函数。但是,由于逻辑异或(XOR)是非线性可分的逻辑函数,因此**单层感知机无法模拟逻辑异或函数的功能**。
-
diff --git a/docs/tutorials/deep_learning/distances/distances.md b/docs/tutorials/deep_learning/distances/distances.md
old mode 100644
new mode 100755
index deccfb47d..12fc377f7
--- a/docs/tutorials/deep_learning/distances/distances.md
+++ b/docs/tutorials/deep_learning/distances/distances.md
@@ -80,4 +80,3 @@ $$
$$
Jaccard(X,Y)=\frac{X\cup Y}{X\cap Y}
$$
-
diff --git a/docs/tutorials/deep_learning/distances/index.rst b/docs/tutorials/deep_learning/distances/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/index.rst b/docs/tutorials/deep_learning/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/loss_functions/Balanced L1 Loss.md b/docs/tutorials/deep_learning/loss_functions/Balanced L1 Loss.md
old mode 100644
new mode 100755
index e36457881..33fa2a86c
--- a/docs/tutorials/deep_learning/loss_functions/Balanced L1 Loss.md
+++ b/docs/tutorials/deep_learning/loss_functions/Balanced L1 Loss.md
@@ -34,4 +34,4 @@ $$ \alpha ln(b|x|+1) = \gamma$$
默认参数设置:α = 0.5,γ=1.5
-延伸阅读:[Libra R-CNN: Towards Balanced Learning for Object Detection](https://arxiv.org/pdf/1904.02701.pdf)
\ No newline at end of file
+延伸阅读:[Libra R-CNN: Towards Balanced Learning for Object Detection](https://arxiv.org/pdf/1904.02701.pdf)
diff --git a/docs/tutorials/deep_learning/loss_functions/CE_Loss.md b/docs/tutorials/deep_learning/loss_functions/CE_Loss.md
old mode 100644
new mode 100755
index 510bd170f..538844a05
--- a/docs/tutorials/deep_learning/loss_functions/CE_Loss.md
+++ b/docs/tutorials/deep_learning/loss_functions/CE_Loss.md
@@ -27,18 +27,18 @@ $$
**图1** 给出了一个三个类别分类的例子。由于输入数据 $x$ 属于类别 $1$,因此其实际类别概率分布值为 $y=(y_1,y_2,y_3)=(1,0,0)$。经过神经网络的变换,得到了输入数据 $x$ 相对于三个类别的预测中间值 $(z1,z2,z3)$。然后,经过 $Softmax$ 函数映射,得到神经网络所预测的输入数据 $x$ 的类别分布概率 $\hat{y}=\left( \hat{y}_1,\hat{y}_2,\hat{y}_3 \right)$。根据前面的介绍,$\hat{y}_1$、$\hat{y}_2$ 和 $\hat{y}_3$ 为 $(0,1)$ 范围之间的一个概率值。由于样本 $x$ 属于第一个类别,因此希望神经网络所预测得到的 $\hat{y}_1$取值要远远大于 $\hat{y}_2$ 和 $\hat{y}_3$ 的取值。为了得到这样的神经网络,在训练中可利用如下交叉熵损失函数来对模型参数进行优化:
$$
-cross\ entropy=-\left( y_1\times \log \left( \hat{y}_1 \right) +y_2\times \log \left( \hat{y}_2 \right) +y_3\times \log \left( \hat{y}_3 \right) \right)
+cross\ entropy=-\left( y_1\times \log \left( \hat{y}_1 \right) +y_2\times \log \left( \hat{y}_2 \right) +y_3\times \log \left( \hat{y}_3 \right) \right)
$$
在上式中,$y_2$ 和 $y_3$ 均为 $0$、$y_1$ 为 $1$,因此交叉熵损失函数简化为:
$$
--y_1\times \log \left( \hat{y}_1 \right) =-\log \left( \hat{y}_1 \right)
+-y_1\times \log \left( \hat{y}_1 \right) =-\log \left( \hat{y}_1 \right)
$$
在神经网络训练中,要将输入数据实际的类别概率分布与模型预测的类别概率分布之间的误差(即损失)从输出端向输入端传递,以便来优化模型参数。下面简单介绍根据交叉熵计算得到的误差从 $\hat{y}_1$ 传递给 $z_1$ 和 $z_2$($z_3$ 的推导与 $z_2$ 相同)的情况。
$$
-\frac{\partial \hat{y}_1}{\partial z_1}=\frac{\partial \left( \frac{e^{z_1}}{\sum_k{e^{z_k}}} \right)}{\partial z_1}=\frac{\left( e^{z_1} \right) ^{'}\times \sum_k{e^{z_k}-e^{z_1}\times e^{z_1}}}{\left( \sum_k{e^{z_k}} \right) ^2}=\frac{e^{z_1}}{\sum_k{e^{z_k}}}-\frac{e^{z_1}}{\sum_k{e^{z_k}}}\times \frac{e^{z_1}}{\sum_k{e^{z_k}}}=\hat{y}_1\left( 1-\hat{y}_1 \right)
+\frac{\partial \hat{y}_1}{\partial z_1}=\frac{\partial \left( \frac{e^{z_1}}{\sum_k{e^{z_k}}} \right)}{\partial z_1}=\frac{\left( e^{z_1} \right) ^{'}\times \sum_k{e^{z_k}-e^{z_1}\times e^{z_1}}}{\left( \sum_k{e^{z_k}} \right) ^2}=\frac{e^{z_1}}{\sum_k{e^{z_k}}}-\frac{e^{z_1}}{\sum_k{e^{z_k}}}\times \frac{e^{z_1}}{\sum_k{e^{z_k}}}=\hat{y}_1\left( 1-\hat{y}_1 \right)
$$
由于交叉熵损失函数 $-\log \left( \hat{y}_1 \right)$ 对 $\hat{y}_1$ 求导的结果为 $-\frac{1}{\hat{y}_1}$,$\hat{y}_1\left( 1-\hat{y}_1 \right)$ 与 $-\frac{1}{\hat{y}_1}$ 相乘为 $\hat{y}_1-1$。这说明一旦得到模型预测输出 $\hat{y}_1$,将该输出减去1就是交叉损失函数相对于 $z_1$ 的偏导结果。
@@ -51,4 +51,4 @@ $$
在上面的例子中,假设所预测中间值 $(z_1,z_2,z_3)$ 经过 $Softmax$ 映射后所得结果为 $(0.34,0.46,0.20)$。由于已知输入数据 $x$ 属于第一类,显然这个输出不理想而需要对模型参数进行优化。如果选择交叉熵损失函数来优化模型,则 $(z_1,z_2,z_3)$ 这一层的偏导值为 $(0.34-1,0.46,0.20)= (-0.66,0.46,0.20)$。
-可以看出,$Softmax$ 和交叉熵损失函数相互结合,为偏导计算带来了极大便利。偏导计算使得损失误差从输出端向输入端传递,来对模型参数进行优化。在这里,交叉熵与$Softmax$ 函数结合在一起,因此也叫 $Softmax$ 损失(Softmax with cross-entropy loss)。
\ No newline at end of file
+可以看出,$Softmax$ 和交叉熵损失函数相互结合,为偏导计算带来了极大便利。偏导计算使得损失误差从输出端向输入端传递,来对模型参数进行优化。在这里,交叉熵与$Softmax$ 函数结合在一起,因此也叫 $Softmax$ 损失(Softmax with cross-entropy loss)。
diff --git a/docs/tutorials/deep_learning/loss_functions/CTC.md b/docs/tutorials/deep_learning/loss_functions/CTC.md
old mode 100644
new mode 100755
index 2f2da78f6..72e252061
--- a/docs/tutorials/deep_learning/loss_functions/CTC.md
+++ b/docs/tutorials/deep_learning/loss_functions/CTC.md
@@ -116,4 +116,4 @@ $$
具体的参数调整方法,可以阅读以下论文进行了解。
-延伸阅读:[Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks](http://www.cs.toronto.edu/~graves/icml_2006.pdf)
\ No newline at end of file
+延伸阅读:[Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks](http://www.cs.toronto.edu/~graves/icml_2006.pdf)
diff --git a/docs/tutorials/deep_learning/loss_functions/MSE.md b/docs/tutorials/deep_learning/loss_functions/MSE.md
old mode 100644
new mode 100755
index a20c7556d..c6310838d
--- a/docs/tutorials/deep_learning/loss_functions/MSE.md
+++ b/docs/tutorials/deep_learning/loss_functions/MSE.md
@@ -4,7 +4,7 @@
## 计算方式
-假设有 $n$ 个训练数据 $x_i$,每个训练数据 $x_i$ 的真实输出为 $y_i$,模型对 $x_i$ 的预测值为 $\hat{y}_i$。该模型在 $n$ 个训练数据下所产生的均方误差损失可定义如下:
+假设有 $n$ 个训练数据 $x_i$,每个训练数据 $x_i$ 的真实输出为 $y_i$,模型对 $x_i$ 的预测值为 $\hat{y}_i$。该模型在 $n$ 个训练数据下所产生的均方误差损失可定义如下:
$$
@@ -14,4 +14,4 @@ $$

-图1 MSE损失示意图
\ No newline at end of file
+图1 MSE损失示意图
diff --git a/docs/tutorials/deep_learning/loss_functions/index.rst b/docs/tutorials/deep_learning/loss_functions/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/metrics/GAN Evaluation.md b/docs/tutorials/deep_learning/metrics/GAN Evaluation.md
old mode 100644
new mode 100755
index 656265b52..524106db0
--- a/docs/tutorials/deep_learning/metrics/GAN Evaluation.md
+++ b/docs/tutorials/deep_learning/metrics/GAN Evaluation.md
@@ -32,7 +32,7 @@ $$\begin{equation} IS(G) = exp(E_{x\sim p_g}D_{KL}(p(y|x)||\widehat{p}(y)))\end{
其中,$x\sim p$:表示从生成器生成的图片;p(y|x):把生成的图片 x 输入到 Inception V3,得到一个 1000 维的向量 y ,即图片x属于各个类别的概率分布;
-$\widehat{p}(y)$:N 个生成的图片(N 通常取 5000),每个生成图片都输入到 Inception V3 中,各自得到一个的概率分布向量,然后求这些向量的平均,得到生成的图片在所有类别上的边缘分布,具体公式如下:
+$\widehat{p}(y)$:N 个生成的图片(N 通常取 5000),每个生成图片都输入到 Inception V3 中,各自得到一个的概率分布向量,然后求这些向量的平均,得到生成的图片在所有类别上的边缘分布,具体公式如下:
$$\begin{equation} \widehat{p}(y)=\frac{1}{N}\sum\limits_{i=1}^N p\left(y|x^\left(i\right)\right)\end{equation} \tag{2}$$
@@ -52,7 +52,7 @@ $$FID\left(P_r,P_g\right) = ||\mu_r-\mu_g|| + T_r\left(C_r+C_g-2\left(C_rC_g\rig
其中Tr 指的是被称为「迹」的线性代数运算(即方阵主对角线上的元素之和)。
-FID方法比较鲁棒,且计算高效。
+FID方法比较鲁棒,且计算高效。
### 其他评价方法
diff --git a/docs/tutorials/deep_learning/metrics/bleu.md b/docs/tutorials/deep_learning/metrics/bleu.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/metrics/evaluation_metric.md b/docs/tutorials/deep_learning/metrics/evaluation_metric.md
old mode 100644
new mode 100755
index 224330d59..8e855f950
--- a/docs/tutorials/deep_learning/metrics/evaluation_metric.md
+++ b/docs/tutorials/deep_learning/metrics/evaluation_metric.md
@@ -84,4 +84,3 @@ $$sensitivity =\frac{TP}{TP + FN}$$
$$specificity =\frac{TN}{TN + FP}$$
即无病(阴性)人群中,检测出阴性的几率。(检测出确实没病的能力)
-
diff --git a/docs/tutorials/deep_learning/metrics/index.rst b/docs/tutorials/deep_learning/metrics/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/metrics/mAP.md b/docs/tutorials/deep_learning/metrics/mAP.md
old mode 100644
new mode 100755
index 120292ff8..c8394a87b
--- a/docs/tutorials/deep_learning/metrics/mAP.md
+++ b/docs/tutorials/deep_learning/metrics/mAP.md
@@ -50,4 +50,4 @@
而最终mAP的计算方式其实可以分成如下两步:
- AP(Average Precision):某一类P-R曲线下的面积。
-- mAP(mean Average Precision):所有类别的AP值取平均。
\ No newline at end of file
+- mAP(mean Average Precision):所有类别的AP值取平均。
diff --git a/docs/tutorials/deep_learning/metrics/perplexity.md b/docs/tutorials/deep_learning/metrics/perplexity.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/metrics/rouge.md b/docs/tutorials/deep_learning/metrics/rouge.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/attention/attention_description.md b/docs/tutorials/deep_learning/model_tuning/attention/attention_description.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/attention/attention_varities.md b/docs/tutorials/deep_learning/model_tuning/attention/attention_varities.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/attention/classic_attention.md b/docs/tutorials/deep_learning/model_tuning/attention/classic_attention.md
old mode 100644
new mode 100755
index 39d711185..7c8547ec6
--- a/docs/tutorials/deep_learning/model_tuning/attention/classic_attention.md
+++ b/docs/tutorials/deep_learning/model_tuning/attention/classic_attention.md
@@ -15,8 +15,8 @@
更明确的讲,**图1**展示的是生成单词"machine"时的计算方式。首先将前一个时刻的输出状态 $q_2$ 和Encoder的输出 $h=[h_1,h_2,h_3,h_4]$ 进行Attention计算,得到一个当前时刻的 $context$ ,用公式可以这样组织:
$$
-\begin{align}
-[a_1,a_2,a_3,a_4] &= softmax([s(q_2, h_1), s(q_2,h_2),s(q_2, h_3),s(q_2, h_4)]) \\ context&=\sum_{i=1}^4 a_i \cdot h_i
+\begin{align}
+[a_1,a_2,a_3,a_4] &= softmax([s(q_2, h_1), s(q_2,h_2),s(q_2, h_3),s(q_2, h_4)]) \\ context&=\sum_{i=1}^4 a_i \cdot h_i
\end{align}
$$
@@ -60,4 +60,3 @@ $$
另外,当输入向量的维度比较高的时候,**点积模型**通常有比较大的方差,从而导致Softmax函数的梯度会比较小。因此**缩放点积模型**通过除以一个**平方根项**来平滑分数数值,也相当于平滑最终的**注意力分布**,缓解这个问题。
最后,**双线性模型**可以重塑为$s(h_i, q) = h^TWq=h^T(U^TV)q=(Uh)^T(Vq)$,即分别对查询向量 $q$ 和原始输入向量 $h$进行线性变换之后,再计算点积。相比点积模型,**双线性模型**在计算相似度时引入了非对称性。
-
diff --git a/docs/tutorials/deep_learning/model_tuning/attention/index.rst b/docs/tutorials/deep_learning/model_tuning/attention/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/attention/q b/docs/tutorials/deep_learning/model_tuning/attention/q
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/attention/self_attention.md b/docs/tutorials/deep_learning/model_tuning/attention/self_attention.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/batch_size.md b/docs/tutorials/deep_learning/model_tuning/batch_size.md
old mode 100644
new mode 100755
index 53f6ea971..aa1cfb655
--- a/docs/tutorials/deep_learning/model_tuning/batch_size.md
+++ b/docs/tutorials/deep_learning/model_tuning/batch_size.md
@@ -19,8 +19,3 @@
1. 更大的batch size会得到更精确的梯度估计值,但其估计梯度的回报是低于线性的。
2. 如果训练集较小,可以直接使用梯度下降法,batch size等于样本集大小。
3. `Deep Learning` 书中提到,在某些硬件上使用特定大小的数组时,运行时间会更少。尤其是在使用GPU时,通常使用2的幂数作为batch size可以获得更少的运行时间。
-
-
-
-
-
diff --git a/docs/tutorials/deep_learning/model_tuning/index.rst b/docs/tutorials/deep_learning/model_tuning/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/learning_rate.md b/docs/tutorials/deep_learning/model_tuning/learning_rate.md
old mode 100644
new mode 100755
index 9e4181102..6ba79512d
--- a/docs/tutorials/deep_learning/model_tuning/learning_rate.md
+++ b/docs/tutorials/deep_learning/model_tuning/learning_rate.md
@@ -31,8 +31,8 @@
```
boundaries = [100, 200] # 指定学习率改变的边界点为100和200
values = [1.0, 0.5, 0.1] # 指定不同区间下的学习率大小
-
- learning_rate = 1.0 if epoch < 100
+
+ learning_rate = 1.0 if epoch < 100
learning_rate = 0.5 if 100 <= epoch < 200
learning_rate = 0.1 if epoch >= 200
@@ -40,25 +40,25 @@
学习率随训练轮数成指数衰减,每次将当前学习率乘以给定的衰减率得到下一个学习率。指数衰减的公式可表示为:
-
+
$$
new\_learning\_rate = last\_learning\_rate * gamma
$$
其中,$gamma$ 为衰减率。
-
+
* 自然指数衰减 (Natural Exponential Decay)
每次将当前学习率乘以给定的衰减率的自然指数得到下一个学习率。其公式表达为:
-
+
$$
new\_learning\_rate = learning\_rate * e^{-gamma*epoch}
$$
其中,$learning\_rate$ 为初始学习率,$gamma$ 为衰减率,$epoch$ 为训练轮数。
-
+
* 多项式衰减(Polynomial Decay)
@@ -66,7 +66,7 @@
若 $cycle=True$,其计算公式为:
-
+
$$
\begin{align}
decay\_steps &= decay\_steps * math.ceil(\frac{epoch}{decay\_steps}) \\
@@ -75,7 +75,7 @@
$$
若 $cycle=False$,其计算公式为:
-
+
$$
\begin{align}
epoch &= min(epoch, decay\_steps) \\
@@ -84,7 +84,7 @@
$$
其中,$learning\_rate$ 为初始学习率,$decay\_step$ 为进行衰减的步长,$end\_lr$ 为最低学习率,$power$ 为多项式的幂。
-
+
* 间隔衰减 (Step Decay)
@@ -94,15 +94,15 @@
learning_rate = 0.5 # 学习率初始值
step_size = 30 # 每训练30个epoch进行一次衰减
gamma = 0.1 # 衰减率
-
-
- learning_rate = 0.5 if epoch < 30
+
+
+ learning_rate = 0.5 if epoch < 30
learning_rate = 0.05 if 30 <= epoch < 60
learning_rate = 0.005 if 60 <= epoch < 90
...
```
-
+
* 多间隔衰减(Multi Step Decay)
@@ -112,7 +112,7 @@
learning_rate = 0.5 # 学习率初始值
milestones = [30, 50] # 指定轮数间隔
gamma = 0.1 # 衰减率
-
+
learning_rate = 0.5 if epoch < 30
learning_rate = 0.05 if 30 <= epoch < 50
learning_rate = 0.005 if 50 <= epoch
@@ -123,13 +123,13 @@
学习率大小与当前衰减次数成反比。其计算公式如下:
-
+
$$
new\_learning\_rate = \frac{learning\_rate}{1 + gamma * epoch}
$$
其中,$learning\_rate$ 为初始学习率,$gamma$ 为衰减率,$epoch$ 为训练轮数。
-
+
* Lambda衰减(Lambda Decay)
@@ -138,7 +138,7 @@
```
learning_rate = 0.5 # 学习率初始值
lr_lambda = lambda epoch: 0.95 ** epoch # 定义lambda函数
-
+
learning_rate = 0.5 # 当epoch = 0时,0.5 * 0.95 ** 0 = 0.5
learning_rate = 0.475 # 当epoch = 1时,0.5 * 0.95 ** 1 = 0.475
learning_rate = 0.45125 # 当epoch = 2时,0.5 * 0.95 ** 2 = 0.45125
@@ -149,7 +149,7 @@
使用 `cosine annealing` 的策略来动态调整学习率,学习率随step数变化成余弦函数周期变化。该方法为论文 [SGDR:Stochastic Gradient Descent with Warm Restarts](https://arxiv.org/abs/1608.03983) 中`cosine annealing`动态学习率。学习率调整公式为:
-
+
$$
\begin{align}
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 + cos(\frac{T_{cur}}{T_{max}}\pi)), \quad T_{cur} \neq (2k+1)T_{max} \\
@@ -158,43 +158,40 @@
$$
其中,$\eta_{max}$的初始值为学习率的初始值,$T_{cur}$是SGDR训练过程中的当前训练轮数。
-
+
* 诺姆衰减(Noam Decay)
诺姆衰减的计算方式如下:
-
+
$$
new\_learning\_rate = learning\_rate * d_{mode}^{-0.5}*min(epoch^{-0.5}, epoch*warmup\_steps^{-1.5})
$$
其中,$d_{model}$ 代表模型的输入、输出向量特征维度,$warmup\_steps$ 为预热步数,$learning\_rate$ 为初始学习率。更多细节请参考 [attention is all you need](https://arxiv.org/pdf/1706.03762.pdf)。
-
+
* loss自适应衰减(Reduce On Plateau)
当loss停止下降时,降低学习率。其思想是:一旦模型表现不再提升,将学习率降低 2-10 倍对模型的训练往往有益。此外,每降低一次学习率后,将会进入一个冷静期。在冷静期内不会监控loss变化也不会进行衰减。当冷静期结束后,会继续监控loss的上升或下降。
-
+
* 线性学习率热身(Linear Warm Up)
线性学习率热身是一种学习率优化策略,在正常调整学习率前,先逐步增大学习率。
当训练步数小于热身步数(warmup_steps)时,学习率 $lr$ 按如下方式更新:
-
-
+
+
$$
lr = start\_lr + (end\_lr - start\_lr) * \frac{epoch}{warmup\_steps}
$$
当训练步数大于等于热身步数(warmup_steps)时,学习率 $lr$ 为:
-
-
+
+
$$
lr = learning\_rate
$$
其中,$lr$ 为热身之后的学习率,$start\_lr$ 为学习率初始值,$end\_lr$ 为最终学习率,$epoch$ 为训练轮数。
-
-
-
diff --git a/docs/tutorials/deep_learning/model_tuning/regularization/data_argumentation.md b/docs/tutorials/deep_learning/model_tuning/regularization/data_argumentation.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/regularization/dropconnect.md b/docs/tutorials/deep_learning/model_tuning/regularization/dropconnect.md
old mode 100644
new mode 100755
index a0383c203..a7f025956
--- a/docs/tutorials/deep_learning/model_tuning/regularization/dropconnect.md
+++ b/docs/tutorials/deep_learning/model_tuning/regularization/dropconnect.md
@@ -31,4 +31,4 @@ $$ u~N(pWv,p(1-p)(W*W)(v*v)) $$
由上面的过程可知,在进行inference时,需要对每个权重都进行sample,所以DropConnect速度会慢些。
-根据作者的观点,Dropout和DropConnect都类似模型平均,Dropout是$2^{|m|}$个模型的平均,而DropConnect是$2^{|M|}$个模型的平均。(m是向量,M是矩阵,取模表示矩阵或向量中对应元素的个数),从这点上来说,DropConnect模型平均能力更强,因为$|M|>|m|$
\ No newline at end of file
+根据作者的观点,Dropout和DropConnect都类似模型平均,Dropout是$2^{|m|}$个模型的平均,而DropConnect是$2^{|M|}$个模型的平均。(m是向量,M是矩阵,取模表示矩阵或向量中对应元素的个数),从这点上来说,DropConnect模型平均能力更强,因为$|M|>|m|$
diff --git a/docs/tutorials/deep_learning/model_tuning/regularization/dropout.md b/docs/tutorials/deep_learning/model_tuning/regularization/dropout.md
old mode 100644
new mode 100755
index 3074bd613..c7f4ae3e9
--- a/docs/tutorials/deep_learning/model_tuning/regularization/dropout.md
+++ b/docs/tutorials/deep_learning/model_tuning/regularization/dropout.md
@@ -80,7 +80,7 @@ droped_train22 = drop22(x2)
# 切换到eval模式
drop22.eval()
droped_eval22 = drop22(x2)
-
+
print('x1 {}, \n droped_train11 \n {}, \n droped_eval11 \n {}'.format(data1, droped_train11.numpy(), droped_eval11.numpy()))
print('x1 {}, \n droped_train12 \n {}, \n droped_eval12 \n {}'.format(data1, droped_train12.numpy(), droped_eval12.numpy()))
print('x2 {}, \n droped_train21 \n {}, \n droped_eval21 \n {}'.format(data2, droped_train21.numpy(), droped_eval21.numpy()))
@@ -90,21 +90,21 @@ print('x2 {}, \n droped_train22 \n {}, \n droped_eval22 \n {}'.format(data2, dro
程序运行结果如下:
```
- x1
+ x1
[[[[0.54340494 0.2783694 0.4245176] [0.84477615 0.00471886 0.12156912] [0.67074907 0.82585275 0.13670659]]
[[0.5750933 0.89132196 0.20920213] [0.18532822 0.10837689 0.21969749] [0.9786238 0.8116832 0.17194101]]
[[0.81622475 0.27407375 0.4317042 ] [0.9400298 0.81764936 0.33611196] [0.17541045 0.37283206 0.00568851]]]
[[[0.25242636 0.7956625 0.01525497] [0.5988434 0.6038045 0.10514768] [0.38194343 0.03647606 0.89041156]]
[[0.98092085 0.05994199 0.89054596] [0.5769015 0.7424797 0.63018394] [0.5818422 0.02043913 0.21002658]]
- [[0.5446849 0.76911515 0.25069523] [0.2858957 0.8523951 0.9750065 ] [0.8848533 0.35950786 0.59885895]]]]
- droped_train11
+ [[0.5446849 0.76911515 0.25069523] [0.2858957 0.8523951 0.9750065 ] [0.8848533 0.35950786 0.59885895]]]]
+ droped_train11
[[[[0. 0.2783694 0.4245176 ] [0. 0.00471886 0. ] [0. 0.82585275 0. ]]
[[0. 0. 0.20920213] [0.18532822 0.10837689 0. ] [0.9786238 0. 0.17194101]]
[[0.81622475 0.27407375 0. ] [0. 0. 0.33611196] [0.17541045 0.37283206 0.00568851]]]
[[[0.25242636 0. 0. ] [0.5988434 0.6038045 0.10514768] [0.38194343 0. 0.89041156]]
[[0.98092085 0. 0. ] [0.5769015 0.7424797 0. ] [0.5818422 0.02043913 0. ]]
- [[0.5446849 0.76911515 0. ] [0. 0.8523951 0.9750065 ] [0. 0.35950786 0.59885895]]]],
- droped_eval11
+ [[0.5446849 0.76911515 0. ] [0. 0.8523951 0.9750065 ] [0. 0.35950786 0.59885895]]]],
+ droped_eval11
[[[[0.27170247 0.1391847 0.2122588 ] [0.42238808 0.00235943 0.06078456] [0.33537453 0.41292638 0.0683533 ]]
[[0.28754666 0.44566098 0.10460106] [0.09266411 0.05418845 0.10984875] [0.4893119 0.4058416 0.08597051]]
[[0.40811238 0.13703687 0.2158521 ] [0.4700149 0.40882468 0.16805598] [0.08770522 0.18641603 0.00284425]]]
@@ -118,31 +118,31 @@ print('x2 {}, \n droped_train22 \n {}, \n droped_eval22 \n {}'.format(data2, dro
[[[0.25242636 0.7956625 0.01525497] [0.5988434 0.6038045 0.10514768] [0.38194343 0.03647606 0.89041156]]
[[0.98092085 0.05994199 0.89054596] [0.5769015 0.7424797 0.63018394] [0.5818422 0.02043913 0.21002658]]
[[0.5446849 0.76911515 0.25069523] [0.2858957 0.8523951 0.9750065 ] [0.8848533 0.35950786 0.59885895]]]]
- droped_train12
+ droped_train12
[[[[0. 0.5567388 0.8490352 ] [0. 0. 0.24313824] [0. 0. 0. ]]
[[0. 0. 0.41840425] [0.37065643 0. 0. ] [1.9572476 0. 0. ]]
[[0. 0. 0. ] [0. 1.6352987 0.6722239 ] [0.3508209 0. 0.01137702]]]
[[[0. 1.591325 0.03050994] [1.1976868 1.207609 0. ] [0.76388687 0. 1.7808231 ]]
[[0. 0. 0. ] [1.153803 0. 0. ] [1.1636844 0. 0.42005315]]
[[1.0893698 0. 0.50139046] [0.5717914 1.7047902 0. ] [0. 0.7190157 0. ]]]]
- droped_eval12
+ droped_eval12
[[[[0.54340494 0.2783694 0.4245176 ] [0.84477615 0.00471886 0.12156912] [0.67074907 0.82585275 0.13670659]]
[[0.5750933 0.89132196 0.20920213] [0.18532822 0.10837689 0.21969749] [0.9786238 0.8116832 0.17194101]]
[[0.81622475 0.27407375 0.4317042 ] [0.9400298 0.81764936 0.33611196] [0.17541045 0.37283206 0.00568851]]]
[[[0.25242636 0.7956625 0.01525497] [0.5988434 0.6038045 0.10514768] [0.38194343 0.03647606 0.89041156]]
[[0.98092085 0.05994199 0.89054596] [0.5769015 0.7424797 0.63018394] [0.5818422 0.02043913 0.21002658]]
[[0.5446849 0.76911515 0.25069523] [0.2858957 0.8523951 0.9750065 ] [0.8848533 0.35950786 0.59885895]]]]
- x2
- [[ 1. 2. 3.] [ 4. 5. 6.] [ 7. 8. 9.] [10. 11. 12.]],
- droped_train21
+ x2
+ [[ 1. 2. 3.] [ 4. 5. 6.] [ 7. 8. 9.] [10. 11. 12.]],
+ droped_train21
[[ 1. 2. 3.] [ 4. 5. 6.] [ 0. 0. 9.] [ 0. 11. 0.]]
- droped_eval21
+ droped_eval21
[[0.5 1. 1.5] [2. 2.5 3. ] [3.5 4. 4.5] [5. 5.5 6. ]]
- x2
+ x2
[[ 1. 2. 3.] [ 4. 5. 6.] [ 7. 8. 9.] [10. 11. 12.]]
- droped_train22
+ droped_train22
[[ 2. 0. 6.] [ 0. 10. 0.] [14. 16. 18.] [ 0. 22. 24.]]
- droped_eval22
+ droped_eval22
[[ 1. 2. 3.] [ 4. 5. 6.] [ 7. 8. 9.] [10. 11. 12.]]
```
@@ -194,4 +194,4 @@ x_2=\left[\begin{array}{ccc}
7 & 8 & 9 \\
10 & 11 & 12
\end{array}\right]
-$$
\ No newline at end of file
+$$
diff --git a/docs/tutorials/deep_learning/model_tuning/regularization/early_stop.md b/docs/tutorials/deep_learning/model_tuning/regularization/early_stop.md
old mode 100644
new mode 100755
index 93b6ea5d5..f81122699
--- a/docs/tutorials/deep_learning/model_tuning/regularization/early_stop.md
+++ b/docs/tutorials/deep_learning/model_tuning/regularization/early_stop.md
@@ -6,4 +6,4 @@

-在上图中,我们在虚线处停止模型的训练,因为在此处之后模型会开始在训练数据上过拟合。
\ No newline at end of file
+在上图中,我们在虚线处停止模型的训练,因为在此处之后模型会开始在训练数据上过拟合。
diff --git a/docs/tutorials/deep_learning/model_tuning/regularization/index.rst b/docs/tutorials/deep_learning/model_tuning/regularization/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/regularization/l1l2.md b/docs/tutorials/deep_learning/model_tuning/regularization/l1l2.md
old mode 100644
new mode 100755
index 1ef66a2cb..62c491dca
--- a/docs/tutorials/deep_learning/model_tuning/regularization/l1l2.md
+++ b/docs/tutorials/deep_learning/model_tuning/regularization/l1l2.md
@@ -68,4 +68,4 @@ $$arg min_{\theta} lnL(\theta)=\sum_{i=1}^n (y_i-\theta^T x_{i})+\lambda \sum_{j
上式正好是线性回归问题在L1范数正则下的代价函数,故验证了结论。
-如果误差符合0均值的高斯分布,那么最大似然估计法的结果就是最小二乘法,这也是为何误差定义经常使用$\sum_{i=1}^n (y_{i}-\theta^Tx_{i})^2$的原因,因为这个公式是基于概率推导出来的
\ No newline at end of file
+如果误差符合0均值的高斯分布,那么最大似然估计法的结果就是最小二乘法,这也是为何误差定义经常使用$\sum_{i=1}^n (y_{i}-\theta^Tx_{i})^2$的原因,因为这个公式是基于概率推导出来的
diff --git a/docs/tutorials/deep_learning/model_tuning/regularization/regularization.md b/docs/tutorials/deep_learning/model_tuning/regularization/regularization.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/model_tuning/weight_initializer.md b/docs/tutorials/deep_learning/model_tuning/weight_initializer.md
old mode 100644
new mode 100755
index 23dac989a..222fc86a7
--- a/docs/tutorials/deep_learning/model_tuning/weight_initializer.md
+++ b/docs/tutorials/deep_learning/model_tuning/weight_initializer.md
@@ -96,7 +96,7 @@ $$
假设在一个神经网络中,对于一层线性网络,其表示为:
-
+
$$
y = f(z_1W_1 + z_2W_2 + z_3W_3 + ... + z_iW_i + b)
@@ -105,21 +105,21 @@ $$
对于其中的每个 $z_iW_i$,其方差为:
-
+
$$
Var(z_iW_i) = E(z_i)^2Var(W_i) + E(W_i)^2Var(z_i)+Var(z_i)Var(W_i)
$$
由于 $W_i$ 和 $z_i$ 的均值都为0,因此可以得到:
-
+
$$
Var(z_iW_i) = Var(z_i)Var(W_i)
$$
又因为 $z$ 和 $W$ 相互独立,则有:
-
+
$$
Var(y) = n_i * Var(z_i)Var(W_i)
@@ -128,7 +128,7 @@ $$
通过上面的公式我们可以发现,输入 $z_i$ 的方差和输出 $y$ 方差相差 $n * Var(W_i)$ 倍,也就是说输入信号在经过神经元后会被放大或缩小 $n * Var(W_i)$ 倍。为保证经过多层网络后,信号不被过分的放大或缩小,我们需要尽可能保证前向传播和反向传播时每层方差保持一致,则有:
-
+
$$
{\forall}i, \quad n_i * Var(W_i) = 1\\
@@ -136,14 +136,14 @@ $$
$$
权衡上述两个限制,提出一个折中的办法:
-
+
$$
{\forall}i, \quad Var(W_i) = \frac{2}{n_i + n_{i+1}}
$$
根据计算出的理想方差,可选择通过高斯分布或均匀分布来随机初始化参数。若采用高斯分布,则权重可按照 $N(0, \frac{2}{n_i + n_{i+1}})$ 的高斯分布来进行初始化。若采用在区间 $[-r, r]$ 的均匀分布进行初始化,则初始化分布有:
-
+
$$
W \sim U[- \frac{\sqrt 6}{\sqrt{n_i + n_{i+1}}}, \frac{\sqrt 6}{\sqrt{n_i + n_{i+1}}}]
@@ -158,28 +158,28 @@ $$
kaiming初始化是一种针对ReLU的初始化方法,假定使用ReLU激活函数时,网络每一层都中有一半的神经元被激活,另一半为0,因此其分布的方差也近似为恒等函数的一半。这样在考虑前向传播和反向传播时则有:
-
-
+
+
$$
{\forall}i, \quad \frac12 n_i * Var(W_i) = 1\\
{\forall}i, \quad \frac12 n_{i+1} * Var(W_i) = 1
$$
$W_i$ 的理想方差为:
-
-
-
+
+
+
$$
{\forall}i, \quad Var(W_i) = \frac{2}{n_i}
$$
-
+
当采用高斯分布时,则权重可按照 $N(0, \frac{2}{n_i})$ 的高斯分布来进行初始化。若采用在区间 $[-r, r]$ 的均匀分布进行初始化,则初始化分布有:
-
-
+
+
$$
W \sim U[- \frac{\sqrt 6}{\sqrt{n_i}}, \frac{\sqrt 6}{\sqrt{n_i}}]
$$
-
-
+
+
具体论文参见:[Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/pdf/1502.01852.pdf)
@@ -187,6 +187,3 @@ $$
## References
1. Bradley, D. (2009). *Learning in modular systems.* Doctoral dissertation, The Robotics Institute, Carnegie Mellon University.
-
-
-
diff --git a/docs/tutorials/deep_learning/normalization/Layer_Normalization.md b/docs/tutorials/deep_learning/normalization/Layer_Normalization.md
old mode 100644
new mode 100755
index f48decadf..195fa36f9
--- a/docs/tutorials/deep_learning/normalization/Layer_Normalization.md
+++ b/docs/tutorials/deep_learning/normalization/Layer_Normalization.md
@@ -44,7 +44,7 @@ $$
2. 控制梯度爆炸和防止梯度消失
我们常用的梯度传递的方式是由深层神经元往浅层传播,如果用$f_{i}^\prime$和$O_i^\prime$分别表示第$i$层对应的激活层导数和输出导数,那么对于$H$层的神经网络,第一层的导数$F_1^\prime=\prod_{i=1}^{H}f_i^\prime*O_i^\prime$,那么对于$f_i^\prime*O_i^\prime$恒大于1的情况,如$f_i^\prime*O_i^\prime\equiv2$的情况,使得结果指数上升,发生梯度爆炸,对于$f_i^\prime*O_i^\prime$恒小于1,如$f_i^\prime*O_i^\prime\equiv0.25$导致结果指数下降,发生梯度消失的现象,底层神经元梯度几乎为0。采用归一化算法后,可以使得$f_i^\prime*O_i^\prime$的结果不会太大也不会太小,有利于控制梯度的传播。
-## **paddle中的API**
+## **paddle中的API**
`paddle.nn.LayerNorm(normalized_shape, epsilon=1e-05, weight_attr=None, bias_attr=None, name=None);`
@@ -129,4 +129,4 @@ print(layer_norm_out)
>
> [5]王岩. 深度神经网络的归一化技术研究[D].南京邮电大学,2019.
>
-> [6]https://zhuanlan.zhihu.com/p/75603087
\ No newline at end of file
+> [6]https://zhuanlan.zhihu.com/p/75603087
diff --git a/docs/tutorials/deep_learning/normalization/basic_normalization.md b/docs/tutorials/deep_learning/normalization/basic_normalization.md
old mode 100644
new mode 100755
index 21c93cf8a..6ccc3a008
--- a/docs/tutorials/deep_learning/normalization/basic_normalization.md
+++ b/docs/tutorials/deep_learning/normalization/basic_normalization.md
@@ -14,7 +14,7 @@
2. 数据归一化后,寻求最优解的过程会变得平缓,可以更快速的收敛到最优解。详解请参见`3.为什么归一化能提高求解最优解的速度`。
-
+
## 3. 为什么归一化能提高求解最优解的速度
@@ -57,26 +57,26 @@ $$
## 4. 归一化有哪些类型
1. Min-max normalization (Rescaling):
-
-
+
+
$$
x^{'} = \frac{x - min(x)}{max(x) - min(x)}
$$
归一化后的数据范围为 [0, 1],其中 $min(x)、 max(x)$ 分别求样本数据的最小值和最大值。
-
+
2. Mean normalization:
-
+
$$
x^{'} = \frac{x - mean(x)}{max(x) - min(x)}
$$
归一化后的数据范围为 [-1, 1],其中 $mean(x)$ 为样本数据的平均值。
-
+
3. Z-score normalization (Standardization):
-
+
$$
x^{'} = \frac{x - \mu}{\sigma}
$$
@@ -86,29 +86,29 @@ $$
* 对数归一化:
-
+
$$
x^{'} = \frac{\lg x}{\lg max(x)}
$$
* 反正切函数归一化:
-
-
+
+
$$
x^{'} = \arctan(x) * \frac{2}{\pi}
$$
归一化后的数据范围为 [-1, 1]
-
+
* 小数定标标准化(Demical Point Normalization):
-
-
+
+
$$
x^{'} = \frac{x}{10^j}
$$
归一化后的数据范围为 [-1, 1],$j$ 为使$max(|x^{'}|) < 1$的最小整数。
-
+
## 5. 不同归一化的使用条件
@@ -119,7 +119,7 @@ $$
3. 非线性归一化通常被用在数据分化程度较大的场景,有时需要通过一些数学函数对原始值进行映射,如对数、反正切等。
-
+
***
@@ -155,12 +155,3 @@ $$
## References
【1】Comparative Analysis of KNN Algorithm using Various Normalization Techniques;Amit Pandey,Achin Jain.
-
-
-
-
-
-
-
-
-
diff --git a/docs/tutorials/deep_learning/normalization/index.rst b/docs/tutorials/deep_learning/normalization/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/optimizers/adabound.md b/docs/tutorials/deep_learning/optimizers/adabound.md
old mode 100644
new mode 100755
index be2b4256d..92625a002
--- a/docs/tutorials/deep_learning/optimizers/adabound.md
+++ b/docs/tutorials/deep_learning/optimizers/adabound.md
@@ -20,5 +20,3 @@ $$\eta_{t}=\hat \eta_{t}/\sqrt{t}$$
$$\theta_{t+1}=\theta_{t}-\eta_{t} \odot m_{t}$$
在这种设置下,AdaBound在最开始表现的像Adam,因为最开始学习率的边界对更新公式影响很小,渐渐的表现的像SGD+momentum,因为学习率逐渐被限制住了。
-
-
diff --git a/docs/tutorials/deep_learning/optimizers/adadelta.md b/docs/tutorials/deep_learning/optimizers/adadelta.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/optimizers/adagrad.md b/docs/tutorials/deep_learning/optimizers/adagrad.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/optimizers/adam.md b/docs/tutorials/deep_learning/optimizers/adam.md
old mode 100644
new mode 100755
index 6e024dfaa..ad2847c82
--- a/docs/tutorials/deep_learning/optimizers/adam.md
+++ b/docs/tutorials/deep_learning/optimizers/adam.md
@@ -40,8 +40,8 @@ Adam在很多情况下算作默认工作性能比较优秀的优化器。
+ 可能不收敛:二阶动量是固定时间窗口内的累积,随着时间窗口的变化,遇到的数据可能发生巨变,使得$V_{t}$可能会时大时小,不是单调变化。这就可能在训练后期引起学习率的震荡,导致模型无法收敛。
- 修正的方法。由于Adam中的学习率主要是由二阶动量控制的,为了保证算法的收敛,可以对二阶动量的变化进行控制,避免上下波动。
-
- $$v_{t}=max(\beta_{2} \cdot v_{t-1}+ (1-\beta_{2})g_{t}^2,v_{t-1})$$
-
+ 修正的方法。由于Adam中的学习率主要是由二阶动量控制的,为了保证算法的收敛,可以对二阶动量的变化进行控制,避免上下波动。
+
+ $$v_{t}=max(\beta_{2} \cdot v_{t-1}+ (1-\beta_{2})g_{t}^2,v_{t-1})$$
+
+ 可能错过全局最优解:自适应学习率算法可能会对前期出现的特征过拟合,后期才出现的特征很难纠正前期的拟合效果。后期Adam的学习率太低,影响了有效的收敛。
diff --git a/docs/tutorials/deep_learning/optimizers/adamax.md b/docs/tutorials/deep_learning/optimizers/adamax.md
old mode 100644
new mode 100755
index 1a2cbc0d6..d27c1af4a
--- a/docs/tutorials/deep_learning/optimizers/adamax.md
+++ b/docs/tutorials/deep_learning/optimizers/adamax.md
@@ -19,5 +19,3 @@ $$u_{t}=\beta_{2}^{\infty}v_{t-1}+(1-\beta_{2}^{\infty})|g_{t}|^{\infty}
$$\theta_{t+1}=\theta_{t}-\frac{\eta}{u_t} \hat m_{t}$$
其中$u_{t}$依赖于 max 操作,这不像Adam中的$m_{t}$和$v_{t}$那样容易趋于0,这也是我们不需要为$u_{t}$计算偏差纠正的原因。建议的默认值是$\eta=0.002$,$\beta_{1}=0.9$和$\beta_{2}=0.999$
-
-
diff --git a/docs/tutorials/deep_learning/optimizers/adamw.md b/docs/tutorials/deep_learning/optimizers/adamw.md
old mode 100644
new mode 100755
index 5e30a435b..e5301527c
--- a/docs/tutorials/deep_learning/optimizers/adamw.md
+++ b/docs/tutorials/deep_learning/optimizers/adamw.md
@@ -27,4 +27,3 @@ $$v_{t}=\beta_{2}v_{t-1}+(1-\beta_{2})(\nabla L(\theta_{t-1}))^2$$
$$\theta_{t}=\theta_{t-1}-\eta(\frac{1}{\sqrt{\hat v_{t}}+\epsilon}\hat m_{t}-\gamma\theta_{t-1})$$
从上面的公式可以看出,AdamW本质上就是在损失函数里面加入了L2正则项,然后计算梯度和更新参数的时候都需要考虑这个正则项。AdamW使用在hugging face版的transformer中,BERT,XLNET,ELECTRA等主流的NLP模型,都是用了AdamW优化器
-
diff --git a/docs/tutorials/deep_learning/optimizers/amsgrad.md b/docs/tutorials/deep_learning/optimizers/amsgrad.md
old mode 100644
new mode 100755
index 0eb47f17a..edca1cd56
--- a/docs/tutorials/deep_learning/optimizers/amsgrad.md
+++ b/docs/tutorials/deep_learning/optimizers/amsgrad.md
@@ -15,4 +15,3 @@ $$\hat v_{t}=max(\hat v_{t-1},v_{t})$$
$$\theta_{t+1}=\theta_{t}-\frac{\eta}{\sqrt{\hat v_{t}}+\epsilon}m_{t}$$
从上面的公式可以看出,参数更新公式与Adam没有啥区别,但是求$\hat v_{t}$有区别。AMSGRAD不增加步长,避免了ADAM和RMSPROP算法的缺陷。
-
diff --git a/docs/tutorials/deep_learning/optimizers/gd.md b/docs/tutorials/deep_learning/optimizers/gd.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/optimizers/index.rst b/docs/tutorials/deep_learning/optimizers/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/optimizers/lookahead.md b/docs/tutorials/deep_learning/optimizers/lookahead.md
old mode 100644
new mode 100755
index 22cce0870..2ee0f220c
--- a/docs/tutorials/deep_learning/optimizers/lookahead.md
+++ b/docs/tutorials/deep_learning/optimizers/lookahead.md
@@ -7,13 +7,13 @@ Lookahead的算法描述如下:
1. 初始化参数$\phi_{0}$和目标函数L
2. 同步周期k,slow权重步长$alpha$和优化器A
- 3. for t=1,2,...
- 4. 同步参数$\theta_{t,0}=\phi_{t-1}$
- 5. for i=1,2,...,k
- 6. 采样一个minibatch的数据:$d \sim D$
- 7. $\theta_{t,i}=\theta_{t,i-1}+A(L,\theta_{t,i-1},d)$
- 8. 外部更新$\phi_{t}=\phi_{t-1}+\alpha(\theta_{t,k}-\phi_{t-1})$
- 返回参数
+ 3. for t=1,2,...
+ 4. 同步参数$\theta_{t,0}=\phi_{t-1}$
+ 5. for i=1,2,...,k
+ 6. 采样一个minibatch的数据:$d \sim D$
+ 7. $\theta_{t,i}=\theta_{t,i-1}+A(L,\theta_{t,i-1},d)$
+ 8. 外部更新$\phi_{t}=\phi_{t-1}+\alpha(\theta_{t,k}-\phi_{t-1})$
+ 返回参数
+ Fast weights
diff --git a/docs/tutorials/deep_learning/optimizers/momentum.md b/docs/tutorials/deep_learning/optimizers/momentum.md
old mode 100644
new mode 100755
index 38161a56c..f825f345e
--- a/docs/tutorials/deep_learning/optimizers/momentum.md
+++ b/docs/tutorials/deep_learning/optimizers/momentum.md
@@ -19,4 +19,4 @@ momentum能够加速SGD方法,并且能够减少震荡,如下图:
**特点**
+ 加入了动量因素,SGD-M缓解了SGD在局部最优点梯度为0,无法持续更新的问题和振荡幅度过大的问题。
-+ 当局部沟壑比较深,动量加持用完了,依然会困在局部最优里来回振荡
\ No newline at end of file
++ 当局部沟壑比较深,动量加持用完了,依然会困在局部最优里来回振荡
diff --git a/docs/tutorials/deep_learning/optimizers/nadam.md b/docs/tutorials/deep_learning/optimizers/nadam.md
old mode 100644
new mode 100755
index 7bf88d047..134c780f7
--- a/docs/tutorials/deep_learning/optimizers/nadam.md
+++ b/docs/tutorials/deep_learning/optimizers/nadam.md
@@ -42,4 +42,4 @@ $$\theta_{t+1}=\theta_{t}-\frac{\eta}{\sqrt{\hat v_{t-1}}+\epsilon}(\beta_{1}\ha
这个方程跟momentum的展开式类似,用$\hat m_{t-1}$替换$\hat m_{t-2}$,Nadam的更新规则为:
-$$\theta_{t+1}=\theta_{t}-\frac{\eta}{\sqrt{\hat v_{t}}+\epsilon}(\beta_{1}\hat m_{t}+\frac{(1-\beta_{1})g_{t}}{1-\beta_{1}^t})$$
\ No newline at end of file
+$$\theta_{t+1}=\theta_{t}-\frac{\eta}{\sqrt{\hat v_{t}}+\epsilon}(\beta_{1}\hat m_{t}+\frac{(1-\beta_{1})g_{t}}{1-\beta_{1}^t})$$
diff --git a/docs/tutorials/deep_learning/optimizers/nag.md b/docs/tutorials/deep_learning/optimizers/nag.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/deep_learning/optimizers/radam.md b/docs/tutorials/deep_learning/optimizers/radam.md
old mode 100644
new mode 100755
index c01202b54..b1409da51
--- a/docs/tutorials/deep_learning/optimizers/radam.md
+++ b/docs/tutorials/deep_learning/optimizers/radam.md
@@ -27,4 +27,3 @@ $$\theta_{t}=\theta_{t-1}-\alpha_{t} r_{t}\hat m_{t} l_{t}$$
如果方差不容易得到(tractable),我们采用下面的公式:
$$\theta_{t}=\theta_{t-1}-\alpha_{t} \hat m_{t}$$
-
diff --git a/docs/tutorials/deep_learning/optimizers/rmsprop.md b/docs/tutorials/deep_learning/optimizers/rmsprop.md
old mode 100644
new mode 100755
index 2c12d0e2b..81d71117f
--- a/docs/tutorials/deep_learning/optimizers/rmsprop.md
+++ b/docs/tutorials/deep_learning/optimizers/rmsprop.md
@@ -7,4 +7,4 @@ $$E[g^2]_{t}=0.9 E[g^2]_{t-1}+0.1 g_{t}^2$$
RMSProp参数更新公式如下,其中$\eta$是学习率, $g_{t}$是当前参数的梯度
$$\theta_{t+1}=\theta_{t}-\frac{\eta}{\sqrt{E[g^2]_{t}+\epsilon}}g_{t}$$
-RMSprop将学习速率除以梯度平方的指数衰减平均值。Hinton建议$\gamma$设置为0.9,默认学习率$\eta$为0.001
\ No newline at end of file
+RMSprop将学习速率除以梯度平方的指数衰减平均值。Hinton建议$\gamma$设置为0.9,默认学习率$\eta$为0.001
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/Collapse.md b/docs/tutorials/generative_adversarial_network/basic_concept/Collapse.md
old mode 100644
new mode 100755
index 75ee7266a..755fe167d
--- a/docs/tutorials/generative_adversarial_network/basic_concept/Collapse.md
+++ b/docs/tutorials/generative_adversarial_network/basic_concept/Collapse.md
@@ -13,4 +13,4 @@
2)Mini-batch discrimination:在判别器的中间层建立一个mini-batch layer用于计算基于L1距离的样本统计量,通过建立该统计量,实现了一个batch内某个样本与其他样本有多接近。这个信息可以被判别器利用到,从而甄别出哪些缺乏多样性的样本。对生成器而言,则要试图生成具有多样性的样本;
-3)使用GAN改进算法:Multi agent diverse GAN(MAD-GAN)采用多个生成器,一个判别器以保障样本生成的多样性等。
\ No newline at end of file
+3)使用GAN改进算法:Multi agent diverse GAN(MAD-GAN)采用多个生成器,一个判别器以保障样本生成的多样性等。
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/Discriminator.md b/docs/tutorials/generative_adversarial_network/basic_concept/Discriminator.md
old mode 100644
new mode 100755
index 006fb343e..4521798eb
--- a/docs/tutorials/generative_adversarial_network/basic_concept/Discriminator.md
+++ b/docs/tutorials/generative_adversarial_network/basic_concept/Discriminator.md
@@ -17,4 +17,4 @@
5)再经过两个全连接层fc1和fc2,得到原始图像的向量表达;
-6)最后通过Sigmoid激活函数,输出判别概率,即图片是真是假的二分类结果。
\ No newline at end of file
+6)最后通过Sigmoid激活函数,输出判别概率,即图片是真是假的二分类结果。
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/GAN loss.md b/docs/tutorials/generative_adversarial_network/basic_concept/GAN loss.md
old mode 100644
new mode 100755
index f82edb55e..5ad514bca
--- a/docs/tutorials/generative_adversarial_network/basic_concept/GAN loss.md
+++ b/docs/tutorials/generative_adversarial_network/basic_concept/GAN loss.md
@@ -18,4 +18,4 @@ $\mathop{min}\limits_{G}\mathop{max}\limits_{D}V(D,G)$表示固定判别器D训
* $z\sim p_{z}(z)$:表示高斯分布的样本,即噪声;
* D(x)代表x为真实图片的概率,如果为1,就代表100%是真实的图片,而输出为0,就代表不可能是真实的图片。
-等式的右边其实就是将等式左边的交叉商损失公式展开,并写成概率分布的期望形式。详细的推导请参见原论文[Generative Adversarial Nets](https://arxiv.org/pdf/1406.2661.pdf)。
\ No newline at end of file
+等式的右边其实就是将等式左边的交叉商损失公式展开,并写成概率分布的期望形式。详细的推导请参见原论文[Generative Adversarial Nets](https://arxiv.org/pdf/1406.2661.pdf)。
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/GAN train.md b/docs/tutorials/generative_adversarial_network/basic_concept/GAN train.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/Game theory.md b/docs/tutorials/generative_adversarial_network/basic_concept/Game theory.md
old mode 100644
new mode 100755
index 5332c7fa1..9d2dec9d7
--- a/docs/tutorials/generative_adversarial_network/basic_concept/Game theory.md
+++ b/docs/tutorials/generative_adversarial_network/basic_concept/Game theory.md
@@ -8,4 +8,4 @@
- **游戏**:一般来说,游戏是由一组玩家,行动/策略和最终收益组成。例如:拍卖、象棋、政治等。
- **玩家**:玩家是参与任何游戏的理性实体。例如:在拍卖会的投标人、石头剪刀布的玩家、参加选举的政治家等。
-- **收益**:收益是所有玩家在获得特定结果时所获得的奖励。它可以是正的,也可以是负的。正如我们之前所讨论的,每个代理都是自私的,并且想要最大化他们的收益:
\ No newline at end of file
+- **收益**:收益是所有玩家在获得特定结果时所获得的奖励。它可以是正的,也可以是负的。正如我们之前所讨论的,每个代理都是自私的,并且想要最大化他们的收益:
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/Generator.md b/docs/tutorials/generative_adversarial_network/basic_concept/Generator.md
old mode 100644
new mode 100755
index 9c5a6d7f3..89d54e2db
--- a/docs/tutorials/generative_adversarial_network/basic_concept/Generator.md
+++ b/docs/tutorials/generative_adversarial_network/basic_concept/Generator.md
@@ -21,4 +21,3 @@
> Tips:全连接层作用:维度变换,变为高维,方便将噪声向量放大。因为全连接层计算量稍大,后序改进的GAN移除全连接层。
> Tips:最后一层激活函数通常使用tanh():既起到激活作用,又起到归一作用,将生成器的输出归一化至[-1,1],作为判别器的输入。也使GAN的训练更稳定,收敛速度更快,生成质量确实更高。
-
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/Input noise.md b/docs/tutorials/generative_adversarial_network/basic_concept/Input noise.md
old mode 100644
new mode 100755
index 606e011d7..35cd46a44
--- a/docs/tutorials/generative_adversarial_network/basic_concept/Input noise.md
+++ b/docs/tutorials/generative_adversarial_network/basic_concept/Input noise.md
@@ -9,4 +9,3 @@ GAN生成器Generator的输入是随机噪声,目的是每次生成不同的
引入随机噪声使得生成的图片具有多样性,比如下图不同的噪声z可以产生不同的数字:

-
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/Nash equilibrium.md b/docs/tutorials/generative_adversarial_network/basic_concept/Nash equilibrium.md
old mode 100644
new mode 100755
index 4e8f118ba..f83fd76b6
--- a/docs/tutorials/generative_adversarial_network/basic_concept/Nash equilibrium.md
+++ b/docs/tutorials/generative_adversarial_network/basic_concept/Nash equilibrium.md
@@ -22,8 +22,3 @@
所以无论B是否招供,A只要招供了,对A而言是最优的策略。
同上,嫌疑犯B想法也是相同的,都依据各自的理性而选择招供,这种情况就被称为纳什均衡点。
-
-
-
-
-
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/Unstable training.md b/docs/tutorials/generative_adversarial_network/basic_concept/Unstable training.md
old mode 100644
new mode 100755
index 4606709aa..267dd5ae8
--- a/docs/tutorials/generative_adversarial_network/basic_concept/Unstable training.md
+++ b/docs/tutorials/generative_adversarial_network/basic_concept/Unstable training.md
@@ -23,4 +23,3 @@ GAN训练不稳定的**原因**如下:
7)如果有标签数据,尽量使用标签信息来训练;
8)标签平滑:如果真实图像的标签设置为1,我们将它更改为一个较低的值,比如0.9,避免鉴别器对其分类过于自信 。
-
diff --git a/docs/tutorials/generative_adversarial_network/basic_concept/index.rst b/docs/tutorials/generative_adversarial_network/basic_concept/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/generative_adversarial_network/encoder_decoder/Decoder.md b/docs/tutorials/generative_adversarial_network/encoder_decoder/Decoder.md
old mode 100644
new mode 100755
index b996c0cde..37592cf4b
--- a/docs/tutorials/generative_adversarial_network/encoder_decoder/Decoder.md
+++ b/docs/tutorials/generative_adversarial_network/encoder_decoder/Decoder.md
@@ -19,4 +19,3 @@ Decoder对缩小后的特征图像向量进行上采样,然后对上采样后
以人脸编码、解码为例,Encoder对人脸进行编码之后,再用解码器Decoder学习人脸的特性,即由短向量恢复到人脸图像,如下图所示:

-
diff --git a/docs/tutorials/generative_adversarial_network/encoder_decoder/Encoder.md b/docs/tutorials/generative_adversarial_network/encoder_decoder/Encoder.md
old mode 100644
new mode 100755
index 52e8dd18e..e1704e998
--- a/docs/tutorials/generative_adversarial_network/encoder_decoder/Encoder.md
+++ b/docs/tutorials/generative_adversarial_network/encoder_decoder/Encoder.md
@@ -15,4 +15,3 @@ Encoder一般是卷积神经网络,主要由卷积层,池化层和BatchNorma
以人脸编码为例,Encoder将人脸图像压缩到短向量,这样短向量就包含了人脸图像的主要信息,例如该向量的元素可能表示人脸肤色、眉毛位置、眼睛大小等等。编码器学习不同人脸,那么它就能学习到人脸的共性:

-
diff --git a/docs/tutorials/generative_adversarial_network/encoder_decoder/index.rst b/docs/tutorials/generative_adversarial_network/encoder_decoder/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/generative_adversarial_network/gan_applications/GAN application.md b/docs/tutorials/generative_adversarial_network/gan_applications/GAN application.md
old mode 100644
new mode 100755
index 5179fd149..275ef7d56
--- a/docs/tutorials/generative_adversarial_network/gan_applications/GAN application.md
+++ b/docs/tutorials/generative_adversarial_network/gan_applications/GAN application.md
@@ -7,43 +7,43 @@
图像生成是生成模型的基本问题,GAN相对先前的生成模型能够生成更高图像质量的图像。如生成逼真的人脸图像

-
+
* 超分辨率
将图像放大时,图片会变得模糊。使用GAN将32\*32的图像扩展为64\*64的真实图像,放大图像的同时提升图片的分辨率。

-
+
* 图像修复
将残缺的图像补全、也可以用于去除纹身、电视logo、水印等。

-
+
* 图像到图像的转换
根据一幅图像生成生成另一幅风格不同图像,比如马变成斑马图、航拍地图变成地图

-
+
* 风景动漫化
将风景图转化为动漫效果

-
+
* 漫画脸
将人脸图生成卡通风格

-
+
* 图像上色
黑白影像上色

-
+
* 文本转图像
根据文字描述生成对应图像
@@ -52,4 +52,3 @@
GAN的应用常用非常广泛,远远不止上述几种。
-
diff --git a/docs/tutorials/generative_adversarial_network/gan_applications/index.rst b/docs/tutorials/generative_adversarial_network/gan_applications/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/generative_adversarial_network/index.rst b/docs/tutorials/generative_adversarial_network/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/generative_adversarial_network/overview/GANs.md b/docs/tutorials/generative_adversarial_network/overview/GANs.md
old mode 100644
new mode 100755
index 7d262803b..f3d6a543d
--- a/docs/tutorials/generative_adversarial_network/overview/GANs.md
+++ b/docs/tutorials/generative_adversarial_network/overview/GANs.md
@@ -6,15 +6,15 @@
### 1.1 生成模型
- 所谓生成模型,就是指可以描述成一个生成数据的模型,属于一种概率模型。维基百科上对其的定义是:在概率统计理论中, **生成模型**是指能够随机生成观测数据的模型,尤其是在给定某些隐含参数的条件下。它给观测值和标注数据序列指定一个[联合概率分布](https://zh.wikipedia.org/wiki/联合概率分布)。在机器学习中,生成模型可以用来直接对数据建模(例如根据某个变量的概率密度函数进行数据采样),也可以用来建立变量间的[条件概率分布](https://zh.wikipedia.org/wiki/条件概率分布)。条件概率分布可以由生成模型根据[贝叶斯定理](https://zh.wikipedia.org/wiki/贝叶斯定理)形成。通俗的说,通过这个模型我们可以生成不包含在训练数据集中的新的数据。如**图1**所示,比如我们有很多马的图片通过生成模型学习这些马的图像,从中学习到马的样子,生成模型就可以生成看起来很真实的马的图像并却这个图像是不属于训练图像的。
+ 所谓生成模型,就是指可以描述成一个生成数据的模型,属于一种概率模型。维基百科上对其的定义是:在概率统计理论中, **生成模型**是指能够随机生成观测数据的模型,尤其是在给定某些隐含参数的条件下。它给观测值和标注数据序列指定一个[联合概率分布](https://zh.wikipedia.org/wiki/联合概率分布)。在机器学习中,生成模型可以用来直接对数据建模(例如根据某个变量的概率密度函数进行数据采样),也可以用来建立变量间的[条件概率分布](https://zh.wikipedia.org/wiki/条件概率分布)。条件概率分布可以由生成模型根据[贝叶斯定理](https://zh.wikipedia.org/wiki/贝叶斯定理)形成。通俗的说,通过这个模型我们可以生成不包含在训练数据集中的新的数据。如**图1**所示,比如我们有很多马的图片通过生成模型学习这些马的图像,从中学习到马的样子,生成模型就可以生成看起来很真实的马的图像并却这个图像是不属于训练图像的。

图1 生成模型处理流程图
- 而我们常见的模型,一般属于判别模型。如**图2**所示,判别模型可以简单的理解为分类。例如把一副图像分成猫或者狗或者其他,像**图2**中我们训练一个判别模型去辨别是否是梵高的画,这个判别模型会对数据集中的画的特征进行提起和分类,从而区分出哪个是梵高大师所作。
+ 而我们常见的模型,一般属于判别模型。如**图2**所示,判别模型可以简单的理解为分类。例如把一副图像分成猫或者狗或者其他,像**图2**中我们训练一个判别模型去辨别是否是梵高的画,这个判别模型会对数据集中的画的特征进行提起和分类,从而区分出哪个是梵高大师所作。
- 因此,生成模型与判别模型的区别在于:
+ 因此,生成模型与判别模型的区别在于:
1. 生成模型的数据集是没有和判别模型类似的标签的(即标记信息,生成模型也是可以有标签的,生成模型可以根据标签去生成相应类别的图像),生成模型像是一种非监督学习,而判别模型是一种监督学习。
@@ -24,46 +24,46 @@
生成模型:p(x) 即观测x出现的概率。如果有标签则表示为: p(x|y) 指定标签y生成x的概率。
-
+
图2 判别模型处理流程图
- 而GAN模型的诞生,就是结合了生成模型的特点与判别模型的特点,通过动态对抗的方式进行训练,在同态平衡中寻找最优解。
+ 而GAN模型的诞生,就是结合了生成模型的特点与判别模型的特点,通过动态对抗的方式进行训练,在同态平衡中寻找最优解。
## 2、什么是GAN?
### 2.1 对抗思想
- GAN的主要思想是对抗思想:对抗思想已经成功地应用于许多领域,如机器学习、人工智能、计算机视觉和自然语言处理。最近AlphaGo击败世界顶尖人类玩家的事件引起了公众对人工智能的兴趣。AlphaGo的中间版本使用两个相互竞争的网络。对抗性示例是指与真实示例非常不同,但被非常自信地归入真实类别的示例,或与真实示例略有不同,但被归入错误类别的示例。这是最近一个非常热门的研究课题。
+ GAN的主要思想是对抗思想:对抗思想已经成功地应用于许多领域,如机器学习、人工智能、计算机视觉和自然语言处理。最近AlphaGo击败世界顶尖人类玩家的事件引起了公众对人工智能的兴趣。AlphaGo的中间版本使用两个相互竞争的网络。对抗性示例是指与真实示例非常不同,但被非常自信地归入真实类别的示例,或与真实示例略有不同,但被归入错误类别的示例。这是最近一个非常热门的研究课题。
- 对抗式机器学习是一个极大极小问题。defender构建了我们想要正确工作的分类器,他在参数空间中搜索,以找到尽可能降低分类器成本的参数。同时,攻击者正在搜索模型的输入以使成本最大化。对抗性思想存在于对抗性网络、对抗性学习和对抗性示例中。
+ 对抗式机器学习是一个极大极小问题。defender构建了我们想要正确工作的分类器,他在参数空间中搜索,以找到尽可能降低分类器成本的参数。同时,攻击者正在搜索模型的输入以使成本最大化。对抗性思想存在于对抗性网络、对抗性学习和对抗性示例中。
- 对抗思想的理论背景是博弈论。博弈论,又称为对策论(Game Theory)、赛局理论等,既是现代数学的一个新分支,也是运筹学的一个重要学科。博弈论主要研究公式化了的激励结构间的相互作用,是研究具有斗争或竞争性质现象的数学理论和方法。博弈论考虑游戏中的个体的预测行为和实际行为,并研究它们的优化策略。生物学家使用博弈理论来理解和预测进化论的某些结果。[(博弈论及其相关概念)](https://paddlepedia.readthedocs.io/en/latest/tutorials/generative_adversarial_network/basic_concept/Game%20theory.html)
+ 对抗思想的理论背景是博弈论。博弈论,又称为对策论(Game Theory)、赛局理论等,既是现代数学的一个新分支,也是运筹学的一个重要学科。博弈论主要研究公式化了的激励结构间的相互作用,是研究具有斗争或竞争性质现象的数学理论和方法。博弈论考虑游戏中的个体的预测行为和实际行为,并研究它们的优化策略。生物学家使用博弈理论来理解和预测进化论的某些结果。[(博弈论及其相关概念)](https://paddlepedia.readthedocs.io/en/latest/tutorials/generative_adversarial_network/basic_concept/Game%20theory.html)
-### 2.2 Generative Adversarial Network(GAN)
+### 2.2 Generative Adversarial Network(GAN)
- GAN如其名,是一个生成与对抗并存的神经网络。一般一个GAN网络包括了一个生成器(Generator)和一个判别器(Discriminator)。生成器用来根据要求不断生成越来越接近实际标签的数据,判别器用来不断区分生成器的生成结果和实际标签的区别。例如对于图像超分辨率问题来说,一般神经网络使用损失函数从不同角度(例如像素、特征图等)监督生成图像与真实标签之间的区别,通过优化寻找损失函数的最小值所对应的模型参数。一个GAN网络模型则会通过生成器生成图像,再通过判别器动态的判别生成图像与真实图像的区别。如下图所示,为了具有对比性,左眼展示的是图像原本样子,右眼是通过GAN网络后的样子。很明显, GAN网络将原本模糊的图像变得更加清晰,细节纹理表现的更加突出了。
+ GAN如其名,是一个生成与对抗并存的神经网络。一般一个GAN网络包括了一个生成器(Generator)和一个判别器(Discriminator)。生成器用来根据要求不断生成越来越接近实际标签的数据,判别器用来不断区分生成器的生成结果和实际标签的区别。例如对于图像超分辨率问题来说,一般神经网络使用损失函数从不同角度(例如像素、特征图等)监督生成图像与真实标签之间的区别,通过优化寻找损失函数的最小值所对应的模型参数。一个GAN网络模型则会通过生成器生成图像,再通过判别器动态的判别生成图像与真实图像的区别。如下图所示,为了具有对比性,左眼展示的是图像原本样子,右眼是通过GAN网络后的样子。很明显, GAN网络将原本模糊的图像变得更加清晰,细节纹理表现的更加突出了。

图4 用于图像超分的GAN模型效果示例
- 当然,GAN网络也不仅仅用于图像超分任务中,图像转换,图像理解,图像填补等任务都可以使用GAN。
+ 当然,GAN网络也不仅仅用于图像超分任务中,图像转换,图像理解,图像填补等任务都可以使用GAN。
- 和其他生成算法相比,GANs的提出是为了克服其他生成算法的缺点。对抗式学习背后的基本思想是,生成器试图创建尽可能真实的示例来欺骗鉴别器。鉴别器试图区分假例子和真例子。生成器和鉴别器都通过对抗式学习进行改进。这种对抗性的过程使GANs比其他生成算法具有显著的优势。更具体地说,GANs比其他生成算法具有以下优势:
+ 和其他生成算法相比,GANs的提出是为了克服其他生成算法的缺点。对抗式学习背后的基本思想是,生成器试图创建尽可能真实的示例来欺骗鉴别器。鉴别器试图区分假例子和真例子。生成器和鉴别器都通过对抗式学习进行改进。这种对抗性的过程使GANs比其他生成算法具有显著的优势。更具体地说,GANs比其他生成算法具有以下优势:
- GANs可以并行生成,这对于其他生成算法是不可能的
- 生成器的设计没有限制。
- 人们主观上认为GANs比其他方法能产生更好的例子。
- 下图是一个经典的GAN网络模型。我们先来理解下GAN的两个模型要做什么。首先是判别模型,就是图中右半部分的网络,图中Discriminator部分就是上文到的判别模型,一般使用常见的神经网络结构如VGG、ResNet等作为结构主体。输入一副图像(如$X_{real},X_{fake}$),输出一个概率值,用于判断真假使用(概率值大于0.5为真,小于0.5为假),但真假也不过是人们定义的概率而已。其次是生成模型(Generator部分),生成模型同样也是由经典网络模型为基础构建的,针对不同问题进行卷积层、池化层等的增删修改。Generator的输入为一组随机数Z,输出一个图像。从图中可以看到存在两个数据集,一个是真实数据集,另一个是假的数据集,这个数据集就是有生成网络造出来的数据集。根据这个图我们再来理解一下GAN的目标:
+ 下图是一个经典的GAN网络模型。我们先来理解下GAN的两个模型要做什么。首先是判别模型,就是图中右半部分的网络,图中Discriminator部分就是上文到的判别模型,一般使用常见的神经网络结构如VGG、ResNet等作为结构主体。输入一副图像(如$X_{real},X_{fake}$),输出一个概率值,用于判断真假使用(概率值大于0.5为真,小于0.5为假),但真假也不过是人们定义的概率而已。其次是生成模型(Generator部分),生成模型同样也是由经典网络模型为基础构建的,针对不同问题进行卷积层、池化层等的增删修改。Generator的输入为一组随机数Z,输出一个图像。从图中可以看到存在两个数据集,一个是真实数据集,另一个是假的数据集,这个数据集就是有生成网络造出来的数据集。根据这个图我们再来理解一下GAN的目标:
- 判别网络的目的:能判别出来属于的一张图它是来自真实样本集还是假样本集。假如输入的是真样本,网络输出就接近1,输入的是假样本,网络输出接近0,这就达到了很好判别的目的。
- 生成网络的目的:生成网络是制作样本的,它的目的就是使得自己制作样本的能力尽可能强,能够达到判别网络没法判断该样本是真样本还是假样本。
- GAN网络主要由生成网络与鉴别网络两个部分,隐变量$ z $ (通常为服从高斯分布的随机噪声)通过Generator生成$ X_{fake} $ , 判别器负责判别输入的data是生成的样本$ X_{fake} $ 还是真实样本$ X_{real} $ 。
+ GAN网络主要由生成网络与鉴别网络两个部分,隐变量$ z $ (通常为服从高斯分布的随机噪声)通过Generator生成$ X_{fake} $ , 判别器负责判别输入的data是生成的样本$ X_{fake} $ 还是真实样本$ X_{real} $ 。

@@ -79,9 +79,9 @@ $$
$$
- 对于判别器D来说,这是一个二分类问题,V(D,G)为二分类问题中常见的交叉熵损失。对于生成器G来说,为了尽可能欺骗D,所以需要最大化生成样本的判别概率D(G(z)),即最小化 $ \log (1 - D(G(z))) $ (注意:$ \log D(x) $ 一项与生成器G无关,所以可以忽略。)
+ 对于判别器D来说,这是一个二分类问题,V(D,G)为二分类问题中常见的交叉熵损失。对于生成器G来说,为了尽可能欺骗D,所以需要最大化生成样本的判别概率D(G(z)),即最小化 $ \log (1 - D(G(z))) $ (注意:$ \log D(x) $ 一项与生成器G无关,所以可以忽略。)
- 实际训练时,生成器和判别器采取交替训练,即先训练D,然后训练G,不断往复。值得注意的是,对于生成器,其最小化的是$ {\max _D}V(D,G) $ ,即最小化$ V(D,G)$的最大值。为了保证V(D,G)取得最大值,所以我们通常会训练迭代k次判别器,然后再迭代1次生成器(不过在实践当中发现,k通常取1即可)。当生成器G固定时,我们可以对V(D,G)求导,求出最优判别器 $ {D^ * }(x) $:
+ 实际训练时,生成器和判别器采取交替训练,即先训练D,然后训练G,不断往复。值得注意的是,对于生成器,其最小化的是$ {\max _D}V(D,G) $ ,即最小化$ V(D,G)$的最大值。为了保证V(D,G)取得最大值,所以我们通常会训练迭代k次判别器,然后再迭代1次生成器(不过在实践当中发现,k通常取1即可)。当生成器G固定时,我们可以对V(D,G)求导,求出最优判别器 $ {D^ * }(x) $:
$$
@@ -89,15 +89,15 @@ $$
$$
- 把最优判别器代入上述目标函数,可以进一步求出在最优判别器下,生成器的目标函数等价于优化$ {p_{data}}(x),{p_g}(x) $ 的JS散度(JSD, Jenson Shannon Divergence)。可以证明,当G,D二者的capacity足够时,模型会收敛,二者将达到纳什均衡。此时,$ {p_{data}}(x) = {p_g}(x) $ ,判别器不论是对于$ {p_{data}}(x) $ 还是$ {p_g}(x) $ 中采样的样本,其预测概率均为$ \frac{1}{2} $ ,即生成样本与真实样本达到了难以区分的地步。
+ 把最优判别器代入上述目标函数,可以进一步求出在最优判别器下,生成器的目标函数等价于优化$ {p_{data}}(x),{p_g}(x) $ 的JS散度(JSD, Jenson Shannon Divergence)。可以证明,当G,D二者的capacity足够时,模型会收敛,二者将达到纳什均衡。此时,$ {p_{data}}(x) = {p_g}(x) $ ,判别器不论是对于$ {p_{data}}(x) $ 还是$ {p_g}(x) $ 中采样的样本,其预测概率均为$ \frac{1}{2} $ ,即生成样本与真实样本达到了难以区分的地步。
## 3、GAN的发展脉络
- 伴随着信息技术的革新、硬件设备算力的不断更替,人工智能在信息化社会蓬勃发展,以生成模型为代表的机器学习领域,持续受到研究者关注。它被广泛应用于计算机视觉领域,如图像生成、视频生成等任务;以信息隐写 、文本生成等任务为代表的自然语言处理方向;音频领域的语音合成等方向,并且在这些任务中,生成模型均表现出了惊人的效果。目前,GAN在计算机视觉、医学、自然语言处理等领域的研究一直保持着活跃状态。此外,生成对抗网络模型的研究工作主要集中在以下两个方面:一是聚焦于理论线索尝试提高生成对抗网络的稳定性和解决它的训练问题,或考虑不同的角度(如信息论、模型效率等方面)丰富其结构;二是专注于生成对抗网络在不同应用领域内的变体结构和应用场景 。除了图像合成,生成对抗网络还在其他方向成功应用,如图像的超分辨率 、图像描述 、图像修复 、文本到图像的翻译 、语义分割 、目标检测 、生成性对抗攻击 、机器翻译 、图像融合及去噪 。
+ 伴随着信息技术的革新、硬件设备算力的不断更替,人工智能在信息化社会蓬勃发展,以生成模型为代表的机器学习领域,持续受到研究者关注。它被广泛应用于计算机视觉领域,如图像生成、视频生成等任务;以信息隐写 、文本生成等任务为代表的自然语言处理方向;音频领域的语音合成等方向,并且在这些任务中,生成模型均表现出了惊人的效果。目前,GAN在计算机视觉、医学、自然语言处理等领域的研究一直保持着活跃状态。此外,生成对抗网络模型的研究工作主要集中在以下两个方面:一是聚焦于理论线索尝试提高生成对抗网络的稳定性和解决它的训练问题,或考虑不同的角度(如信息论、模型效率等方面)丰富其结构;二是专注于生成对抗网络在不同应用领域内的变体结构和应用场景 。除了图像合成,生成对抗网络还在其他方向成功应用,如图像的超分辨率 、图像描述 、图像修复 、文本到图像的翻译 、语义分割 、目标检测 、生成性对抗攻击 、机器翻译 、图像融合及去噪 。
- 2014年,Ian GoodFellow提出了GAN模型。自GAN提出起,生成对抗网络迅速成为了最火的生成式模型。在快速发展的青春期,GAN产生了许多流行的架构,如DCGAN,StyleGAN,BigGAN,StackGAN,Pix2pix,Age-cGAN,CycleGAN等。这个是生成对抗网络家族图。左边部分主要是改进模型解决实际的图片转换,文本转图像,生成图片,视频转换等实际问题;右边部分则是主要解决GAN框架本身存在的一些问题。传统的生成模型最早要追溯到80年代的RBM,以及后来逐渐使用深度神经网络进行包装的AutoEncoder。然后就是现在称得上最火的生成模型GAN。
+ 2014年,Ian GoodFellow提出了GAN模型。自GAN提出起,生成对抗网络迅速成为了最火的生成式模型。在快速发展的青春期,GAN产生了许多流行的架构,如DCGAN,StyleGAN,BigGAN,StackGAN,Pix2pix,Age-cGAN,CycleGAN等。这个是生成对抗网络家族图。左边部分主要是改进模型解决实际的图片转换,文本转图像,生成图片,视频转换等实际问题;右边部分则是主要解决GAN框架本身存在的一些问题。传统的生成模型最早要追溯到80年代的RBM,以及后来逐渐使用深度神经网络进行包装的AutoEncoder。然后就是现在称得上最火的生成模型GAN。

@@ -111,11 +111,11 @@ $$
**表1**是基于算法的GANs方法的整理,从GANs训练策略、结构变化、训练技巧、监督类型等方面对现有GAN方法进行了分类。本文选取经典模型与方法进行说明。
-### 4.1 GAN的代表性变体
+### 4.1 GAN的代表性变体
#### 4.1.1 InfoGAN
- 它的原理很简单,在info GAN里面,把输入向量z分成两部分,c 和 z'。c可以理解为可解释的隐变量,而z可以理解为不可压缩的噪声。希望通过约束c与output的关系,使得c的维度对应output的语义特征,以手写数字为例,比如笔画粗细,倾斜度等。为了引入c,作者通过互信息的方式来对c进行约束,也可以理解成自编码的过程。具体的操作是,generator的output,经过一个分类器,看是否能够得到c。其实可以看成一个anto-encoder的反过程。其余的discriminator与常规的GAN是一样的。
+ 它的原理很简单,在info GAN里面,把输入向量z分成两部分,c 和 z'。c可以理解为可解释的隐变量,而z可以理解为不可压缩的噪声。希望通过约束c与output的关系,使得c的维度对应output的语义特征,以手写数字为例,比如笔画粗细,倾斜度等。为了引入c,作者通过互信息的方式来对c进行约束,也可以理解成自编码的过程。具体的操作是,generator的output,经过一个分类器,看是否能够得到c。其实可以看成一个anto-encoder的反过程。其余的discriminator与常规的GAN是一样的。

@@ -123,13 +123,13 @@ $$
- 在实际过程中,classifier和discriminator会共享参数,只有最后一层是不一样的,classifier输出的是一个vector, discriminator输出的是一个标量。
+ 在实际过程中,classifier和discriminator会共享参数,只有最后一层是不一样的,classifier输出的是一个vector, discriminator输出的是一个标量。
从损失函数的角度来看,infoGAN的损失函数变为:
$$
{\min _G}{\max _D}{V_I}(D,G) = V(D,G) - \lambda I(c;G(z,c))
$$
- 相比起原始的GAN,多了一项 $ \lambda I(c;G(z,c)) $,这一项代表的就是c与generator的output的互信息。这一项越大,表示c与output越相关。
+ 相比起原始的GAN,多了一项 $ \lambda I(c;G(z,c)) $,这一项代表的就是c与generator的output的互信息。这一项越大,表示c与output越相关。
为什么info GAN是有效的?直观的理解就是,如果c的每一个维度对Output都有明确的影响,那么classifier就可以根据x返回原来的c。如果c对output没有明显的影响,那么classifier就无法返回原来的c。下面是info GAN的结果。改变categorical变量可以生成不同的数字,改变continuous变量可以改变倾斜度和笔画粗细。
@@ -141,7 +141,7 @@ $$
#### 4.1.2 Conditional GANs (cGANs)
- 如果鉴别器和生成器都依赖于一些额外的信息,则GANs可以扩展为一个条件模型。条件GANs的目标函数是:
+ 如果鉴别器和生成器都依赖于一些额外的信息,则GANs可以扩展为一个条件模型。条件GANs的目标函数是:
$$
@@ -149,11 +149,11 @@ $$
$$
- 我们可以看到InfoGAN的生成器与CGAN的生成器相似。然而,InfoGAN的潜在编码是未知的,它是通过训练发现的。此外,InfoGAN还有一个额外的网络Qto输出条件变量$ Q(c|x) $。
+ 我们可以看到InfoGAN的生成器与CGAN的生成器相似。然而,InfoGAN的潜在编码是未知的,它是通过训练发现的。此外,InfoGAN还有一个额外的网络Qto输出条件变量$ Q(c|x) $。
- 基于CGAN,我们可以在类标签、文本、边界框和关键点上生成样本条件。使用堆叠生成对抗网络(SGAN)进行文本到照片真实感图像合成。CGAN已用于卷积人脸生成、人脸老化、图像转换、合成具有特定景物属性的户外图像、自然图像描述和3D感知场景操作。Chrysos等人提出了稳健的CGAN。Kumparampil等人讨论了条件GAN对噪声标签的鲁棒性。条件循环根使用具有循环一致性的CGAN。模式搜索GANs(MSGANs)提出了一个简单而有效的正则化项,用于解决CGAN的模式崩溃问题。
+ 基于CGAN,我们可以在类标签、文本、边界框和关键点上生成样本条件。使用堆叠生成对抗网络(SGAN)进行文本到照片真实感图像合成。CGAN已用于卷积人脸生成、人脸老化、图像转换、合成具有特定景物属性的户外图像、自然图像描述和3D感知场景操作。Chrysos等人提出了稳健的CGAN。Kumparampil等人讨论了条件GAN对噪声标签的鲁棒性。条件循环根使用具有循环一致性的CGAN。模式搜索GANs(MSGANs)提出了一个简单而有效的正则化项,用于解决CGAN的模式崩溃问题。
- 对原始信号源[3]的鉴别器进行训练,使其分配给正确信号源的对数可能性最大化:
+ 对原始信号源[3]的鉴别器进行训练,使其分配给正确信号源的对数可能性最大化:
$$
@@ -161,7 +161,7 @@ L = E[\log P(S = real|{X_{real}})] + E[\log (P(S = fake|{X_{fake}}))]
$$
- 辅助分类器GAN(AC-GAN)的目标函数有两部分:正确源的对数似然数LS和正确类标签的对数似然数LC
+ 辅助分类器GAN(AC-GAN)的目标函数有两部分:正确源的对数似然数LS和正确类标签的对数似然数LC
$$
@@ -175,7 +175,7 @@ $$
- pix2pix的插图:训练条件GANs映射灰度→颜色鉴别器学习在真实灰度、颜色元组和伪(由生成器合成)之间进行分类。与原始GANs不同,发生器和鉴别器都观察输入的灰度图像,pix2pix发生器没有噪声输入。
+ pix2pix的插图:训练条件GANs映射灰度→颜色鉴别器学习在真实灰度、颜色元组和伪(由生成器合成)之间进行分类。与原始GANs不同,发生器和鉴别器都观察输入的灰度图像,pix2pix发生器没有噪声输入。

@@ -183,7 +183,7 @@ $$
- 整个网络结构如上图所示,其中z为生成网络随机的输入,y为条件,x为真实样本。训练过程仍如GANs,先训练判别器,再训练生成器,交。替进行,直到判别器无法判定真实样本和生成的样本。训练过程中的不同在于,判别器D需要判别三种类型:
+ 整个网络结构如上图所示,其中z为生成网络随机的输入,y为条件,x为真实样本。训练过程仍如GANs,先训练判别器,再训练生成器,交。替进行,直到判别器无法判定真实样本和生成的样本。训练过程中的不同在于,判别器D需要判别三种类型:
1. 条件和与条件相符的真实图片,期望输出为1;
@@ -191,7 +191,7 @@ $$
3. 条件和生成网络生成的输出,期望输出为0
- 在cGANs的论文中,进行了MNIST数据集的测试。在这个测试中,加入的条件为每个图片的标签。也就是生成器G的输入为随机向量和需要生成的图片的对应标签。判别器D的输入为真实图片和真实图片对应的标签、以及生成图片。下图为生成的一些图片
+ 在cGANs的论文中,进行了MNIST数据集的测试。在这个测试中,加入的条件为每个图片的标签。也就是生成器G的输入为随机向量和需要生成的图片的对应标签。判别器D的输入为真实图片和真实图片对应的标签、以及生成图片。下图为生成的一些图片

@@ -199,11 +199,11 @@ $$
- 在训练一个GAN时,只把0这个数字的图片作为真实样本放入GAN训练,GAN能生成一个数字的图片(比如0这个数字的图片),而要想生成0-9所有的对应图片,则需要训练10个不同的GAN,但是加入条件,也就是每个图片样本对应的标签的时候,我们就可以把10个数字的样本和对应的标签都同时放到这个网络中,就可以使用一个GAN网络生成0-9这十个数字的图片了
+ 在训练一个GAN时,只把0这个数字的图片作为真实样本放入GAN训练,GAN能生成一个数字的图片(比如0这个数字的图片),而要想生成0-9所有的对应图片,则需要训练10个不同的GAN,但是加入条件,也就是每个图片样本对应的标签的时候,我们就可以把10个数字的样本和对应的标签都同时放到这个网络中,就可以使用一个GAN网络生成0-9这十个数字的图片了
#### 4.1.3 CycleGAN
- CycleGAN本质上是两个镜像对称的GAN,构成了一个环形网络。两个GAN共享两个生成器,并各自带一个判别器,即共有两个判别器和两个生成器。一个单向GAN两个loss,两个即共四个loss。
+ CycleGAN本质上是两个镜像对称的GAN,构成了一个环形网络。两个GAN共享两个生成器,并各自带一个判别器,即共有两个判别器和两个生成器。一个单向GAN两个loss,两个即共四个loss。

@@ -229,21 +229,21 @@ CycleGAN的网络架构如图所示:
- 可以实现无配对的两个图片集的训练是CycleGAN与Pixel2Pixel相比的一个典型优点。但是我们仍然需要通过训练创建这个映射来确保输入图像和生成图像间存在有意义的关联,即输入输出共享一些特征。
+ 可以实现无配对的两个图片集的训练是CycleGAN与Pixel2Pixel相比的一个典型优点。但是我们仍然需要通过训练创建这个映射来确保输入图像和生成图像间存在有意义的关联,即输入输出共享一些特征。
- 简而言之,该模型通过从域DA获取输入图像,该输入图像被传递到第一个生成器GeneratorA→B,其任务是将来自域DA的给定图像转换到目标域DB中的图像。然后这个新生成的图像被传递到另一个生成器GeneratorB→A,其任务是在原始域DA转换回图像CyclicA,这里可与自动编码器作对比。这个输出图像必须与原始输入图像相似,用来定义非配对数据集中原来不存在的有意义映射。
+ 简而言之,该模型通过从域DA获取输入图像,该输入图像被传递到第一个生成器GeneratorA→B,其任务是将来自域DA的给定图像转换到目标域DB中的图像。然后这个新生成的图像被传递到另一个生成器GeneratorB→A,其任务是在原始域DA转换回图像CyclicA,这里可与自动编码器作对比。这个输出图像必须与原始输入图像相似,用来定义非配对数据集中原来不存在的有意义映射。
-### 4.2 GANs的训练策略
+### 4.2 GANs的训练策略
- 尽管理论上存在唯一的解决方案,但由于多种原因,GANs训练很困难,而且往往不稳定。一个困难是,GANs的最优权重对应于损失函数的鞍点,而不是极小值。具体模型训练可以参考[这里](https://paddlepedia.readthedocs.io/en/latest/tutorials/generative_adversarial_network/basic_concept/GAN%20train.html)。
+ 尽管理论上存在唯一的解决方案,但由于多种原因,GANs训练很困难,而且往往不稳定。一个困难是,GANs的最优权重对应于损失函数的鞍点,而不是极小值。具体模型训练可以参考[这里](https://paddlepedia.readthedocs.io/en/latest/tutorials/generative_adversarial_network/basic_concept/GAN%20train.html)。
- 有许多关于GANs训练的论文。Yadav等人用预测方法稳定了GANs。通过使用独立学习率,为鉴别器和生成器提出了两个时间尺度更新规则(TTUR),以确保模型能够收敛到稳定的局部纳什均衡。Arjovsky为充分理解GANs的训练做了很多理论上的研究,分析了GANs难以训练的原因,严格研究论证了训练中出现的饱和、不稳定等问题,研究了缓解这些问题的实际和理论基础方向,并引入了新的研究工具。Liang等人认为GANs训练是一个持续的学习问题。改进GANs训练的一种方法是评估训练中可能出现的经验性“症状”。这些症状包括:生成模型崩溃,为不同的输入生成非常相似的样本;鉴别器损耗迅速收敛到零,不向发生器提供梯度更新;模型收敛困难。
+ 有许多关于GANs训练的论文。Yadav等人用预测方法稳定了GANs。通过使用独立学习率,为鉴别器和生成器提出了两个时间尺度更新规则(TTUR),以确保模型能够收敛到稳定的局部纳什均衡。Arjovsky为充分理解GANs的训练做了很多理论上的研究,分析了GANs难以训练的原因,严格研究论证了训练中出现的饱和、不稳定等问题,研究了缓解这些问题的实际和理论基础方向,并引入了新的研究工具。Liang等人认为GANs训练是一个持续的学习问题。改进GANs训练的一种方法是评估训练中可能出现的经验性“症状”。这些症状包括:生成模型崩溃,为不同的输入生成非常相似的样本;鉴别器损耗迅速收敛到零,不向发生器提供梯度更新;模型收敛困难。
#### 4.2.1 基于输入输出改进的GAN模型
- 基于输入输出的改进主要是指从 G 的输入端和 D 的输出端进行改进。在 GAN 的基本模型中, G 的输入为隐空间上的随机变量,因此对其改进主要从隐空间与隐变量这两点展开。改进隐变量的目的是使其更好地控制生成样本的细节,而改进隐空间则是为了更好地区分不同的生成模式。 D 输出的判别结果是真假二分类,可以配合目标函数将其调整为多分类或去除神经网络的 Softmax 层直接输出特征向量,进而优化训练过程、实现半监督学习等效果。
+ 基于输入输出的改进主要是指从 G 的输入端和 D 的输出端进行改进。在 GAN 的基本模型中, G 的输入为隐空间上的随机变量,因此对其改进主要从隐空间与隐变量这两点展开。改进隐变量的目的是使其更好地控制生成样本的细节,而改进隐空间则是为了更好地区分不同的生成模式。 D 输出的判别结果是真假二分类,可以配合目标函数将其调整为多分类或去除神经网络的 Softmax 层直接输出特征向量,进而优化训练过程、实现半监督学习等效果。
- BiCoGAN 模型的提出者认为MIRZA提出的模型的输入 z 与 c 相互纠缠,因此增加了一个编码器(记为 E )用于学习从判别器输出到生成器两个输入的逆映射,从而更精确地编码 c ,以提升模型表现。如**图14**所示,将 z 与 c 的拼接(记为ˆ z)输入生成器得到输出G(ˆ z) ,将真实样本 x 输入编码器 得 到 输 出 E(x) , 判 别 器 接 收G [(ˆz) ,ˆz] 或[x, E(x)], 作为输入,判定该输入来自生成器或为真实数据的某一类。由于真实样本 x 具有的标签可视为 c ,而 E(x)又可以被拆分为 z'与 c',因此使 c 与 c'尽可能接近,也成为模型训练的目标,从而使编码器学习逆映射。文中提出使用 EFL(extrinsic factor loss)衡量两个分布 p c 与 p c' 的距离,并提出如式(6)所示的目标函数。
+ BiCoGAN 模型的提出者认为MIRZA提出的模型的输入 z 与 c 相互纠缠,因此增加了一个编码器(记为 E )用于学习从判别器输出到生成器两个输入的逆映射,从而更精确地编码 c ,以提升模型表现。如**图14**所示,将 z 与 c 的拼接(记为ˆ z)输入生成器得到输出G(ˆ z) ,将真实样本 x 输入编码器 得 到 输 出 E(x) , 判 别 器 接 收G [(ˆz) ,ˆz] 或[x, E(x)], 作为输入,判定该输入来自生成器或为真实数据的某一类。由于真实样本 x 具有的标签可视为 c ,而 E(x)又可以被拆分为 z'与 c',因此使 c 与 c'尽可能接近,也成为模型训练的目标,从而使编码器学习逆映射。文中提出使用 EFL(extrinsic factor loss)衡量两个分布 p c 与 p c' 的距离,并提出如式(6)所示的目标函数。

@@ -251,7 +251,7 @@ CycleGAN的网络架构如图所示:
- IcGAN (invertible conditional GAN)以MIRZA的模型为基础,增加了两个预训练的编码器 E z 和E y ,E z 用于生成隐空间中的随机变量 z,E y 用于生成原始条件 y,通过将 y 修改成 y'作为 cGAN的输入条件,从而控制合成图像的细节(如**图15**所示)。文章提出了 3 种从分布中进行采样获得y'的方法:当 y 为二进制向量时,可通过 KDE(kernel denisity estimation)拟合分布并进行采样;当 y 为实向量时,可选取训练集的标签向量进行直接插值;当某个条件并不是在所有训练集中表现唯一时,可直接对 p data 进行采样。
+ IcGAN (invertible conditional GAN)以MIRZA的模型为基础,增加了两个预训练的编码器 E z 和E y ,E z 用于生成隐空间中的随机变量 z,E y 用于生成原始条件 y,通过将 y 修改成 y'作为 cGAN的输入条件,从而控制合成图像的细节(如**图15**所示)。文章提出了 3 种从分布中进行采样获得y'的方法:当 y 为二进制向量时,可通过 KDE(kernel denisity estimation)拟合分布并进行采样;当 y 为实向量时,可选取训练集的标签向量进行直接插值;当某个条件并不是在所有训练集中表现唯一时,可直接对 p data 进行采样。

@@ -259,7 +259,7 @@ CycleGAN的网络架构如图所示:
- DeLiGAN 适用于训练数据规模小、种类多的场景,DeliGAN 模型如**图16**所示。Gurumurthy等提出使用 GMM(Gaussian mixture model)对隐空间进行参数化,再随机选择一个高斯分量进行重参数化,从指定的高斯分布中获取样本,但模型使用 GMM 是一种简化假设,限制了其逼近更复杂分布的能力。
+ DeLiGAN 适用于训练数据规模小、种类多的场景,DeliGAN 模型如**图16**所示。Gurumurthy等提出使用 GMM(Gaussian mixture model)对隐空间进行参数化,再随机选择一个高斯分量进行重参数化,从指定的高斯分布中获取样本,但模型使用 GMM 是一种简化假设,限制了其逼近更复杂分布的能力。

@@ -267,11 +267,11 @@ CycleGAN的网络架构如图所示:
- NEMGAN(noise engineered mode matchingGAN)的提出者提出一种能够在训练集存在数据不均衡情况下表现较好的模式匹配策略,根据生成样本训练出其在隐空间中的对应表示,得到潜在模式的先验分布,从而将生成样本的多种模式进行分离,并且与真实样本的模式进行匹配,保证了生成样本中包含多个真实样本的模式,以缓解模式崩溃问题。
+ NEMGAN(noise engineered mode matchingGAN)的提出者提出一种能够在训练集存在数据不均衡情况下表现较好的模式匹配策略,根据生成样本训练出其在隐空间中的对应表示,得到潜在模式的先验分布,从而将生成样本的多种模式进行分离,并且与真实样本的模式进行匹配,保证了生成样本中包含多个真实样本的模式,以缓解模式崩溃问题。
- FCGAN(fully conditional GAN)以MIRZA的模型为基础,将额外信息 c 连接到神经网络的每一层,一定限度上提升了有条件生成样本时的生成样本质量,但该模型在 c 较为复杂或大向量的场景中运算效率低。
+ FCGAN(fully conditional GAN)以MIRZA的模型为基础,将额外信息 c 连接到神经网络的每一层,一定限度上提升了有条件生成样本时的生成样本质量,但该模型在 c 较为复杂或大向量的场景中运算效率低。
- SGAN(semi-supervised learning GAN) 是一种能够为数据集重建标签信息的半监督模型,其模型如**图17** 所示。它将 D 改进为分类器与判别器的结合体,D 的输出包含 N 类真实样本和一类生成样本,共有 N+1 类。向模型输入无标签的样本且判别器将其分类为真实样本时,可以将判别器的输出作为该样本的标签。
+ SGAN(semi-supervised learning GAN) 是一种能够为数据集重建标签信息的半监督模型,其模型如**图17** 所示。它将 D 改进为分类器与判别器的结合体,D 的输出包含 N 类真实样本和一类生成样本,共有 N+1 类。向模型输入无标签的样本且判别器将其分类为真实样本时,可以将判别器的输出作为该样本的标签。

@@ -279,15 +279,15 @@ CycleGAN的网络架构如图所示:
- AC-GAN(auxiliary classifier GAN)同时具备MIRZA的模型和ODENA的模型的特点,G 输入随机变量与分类信息 c,D 输出样本为假和分类概率,该方法能够在有条件生成样本时输出生成样本所属的类别。
+ AC-GAN(auxiliary classifier GAN)同时具备MIRZA的模型和ODENA的模型的特点,G 输入随机变量与分类信息 c,D 输出样本为假和分类概率,该方法能够在有条件生成样本时输出生成样本所属的类别。
#### 4.2.2 基于生成器改进的 GAN 模型
- 基于生成器进行改进的工作,旨在提高生成样本质量与避免模式崩溃问题,使模型能够生成多种类的样本,且同一种类内的样本具有多样性。改进的思路包括:使用集成学习(ensemble learning)的思想综合多个弱生成器所学习到的模式、设计每个生成器专注于学习特定模式多生成器架构,从而使模型整体包含多个模式,使用多智能体系统的思想使多个生成器之间产生竞争与合作的关系等。
+ 基于生成器进行改进的工作,旨在提高生成样本质量与避免模式崩溃问题,使模型能够生成多种类的样本,且同一种类内的样本具有多样性。改进的思路包括:使用集成学习(ensemble learning)的思想综合多个弱生成器所学习到的模式、设计每个生成器专注于学习特定模式多生成器架构,从而使模型整体包含多个模式,使用多智能体系统的思想使多个生成器之间产生竞争与合作的关系等。
- AdaGAN模型的提出者提出了一种融入集成学习思想的迭代训练算法。在单步迭代过程中,根据训练样本与混合权值得到一个弱生成器,该弱生成器与上一轮迭代得到的弱生成器加权混合,得到本次迭代结果。若干轮迭代以后,生成器综合了多个弱生成器各自学习到的模式,缓解了模式缺失导致的模式崩溃问题,并能够生成出质量较好的样本。但是,混合多个生成器网络导致输入的隐空间不连续,不能像基本 GAN 模型那样通过插值法得到新的隐变量。
+ AdaGAN模型的提出者提出了一种融入集成学习思想的迭代训练算法。在单步迭代过程中,根据训练样本与混合权值得到一个弱生成器,该弱生成器与上一轮迭代得到的弱生成器加权混合,得到本次迭代结果。若干轮迭代以后,生成器综合了多个弱生成器各自学习到的模式,缓解了模式缺失导致的模式崩溃问题,并能够生成出质量较好的样本。但是,混合多个生成器网络导致输入的隐空间不连续,不能像基本 GAN 模型那样通过插值法得到新的隐变量。
- MADGAN(multi-agent diverse GAN)由多个生成器和一个判别器组成,其模型如图16所示。其中,判别器负责判断输入样本是真实样本还是生成样本,若为生成样本则判断它是由哪一个生成器所生成的。每个生成器专注于学习特定模式,模型使多个生成器各自学习,模型最终得到的生成样本来自多个学习到不同模式的生成器,显式地保证了生成样本的多样性,缓解了模式崩溃问题。
+ MADGAN(multi-agent diverse GAN)由多个生成器和一个判别器组成,其模型如图16所示。其中,判别器负责判断输入样本是真实样本还是生成样本,若为生成样本则判断它是由哪一个生成器所生成的。每个生成器专注于学习特定模式,模型使多个生成器各自学习,模型最终得到的生成样本来自多个学习到不同模式的生成器,显式地保证了生成样本的多样性,缓解了模式崩溃问题。

@@ -295,7 +295,7 @@ CycleGAN的网络架构如图所示:
- MGAN 缓解模式崩溃问题的思路与HOANG等人的思路类似,其模型如**图9**所示。该模型设计了一种与判别器权值共享但去除掉 Softmax 层的分类器,用于承担判断生成样本所属生成器的功能,判别器仅负责判别样本为真实样本还是生成样本。
+ MGAN 缓解模式崩溃问题的思路与HOANG等人的思路类似,其模型如**图9**所示。该模型设计了一种与判别器权值共享但去除掉 Softmax 层的分类器,用于承担判断生成样本所属生成器的功能,判别器仅负责判别样本为真实样本还是生成样本。

@@ -303,7 +303,7 @@ CycleGAN的网络架构如图所示:
- MPMGAN(message passing multi-agent GAN)模型是一种引入消息传递机制的多生成器,生成器输出作为传递给其他生成器的消息。在消息共享机制的作用下,所有生成器都有合作目标、竞争目标两种目标。合作目标鼓励其他生成器的生成样本优于自身的生成样本;竞争目标促使自身的生成样本优于其他生成器的生成样本。两种目标共同作用使生成样本质量得以优化。
+ MPMGAN(message passing multi-agent GAN)模型是一种引入消息传递机制的多生成器,生成器输出作为传递给其他生成器的消息。在消息共享机制的作用下,所有生成器都有合作目标、竞争目标两种目标。合作目标鼓励其他生成器的生成样本优于自身的生成样本;竞争目标促使自身的生成样本优于其他生成器的生成样本。两种目标共同作用使生成样本质量得以优化。

@@ -313,9 +313,9 @@ CycleGAN的网络架构如图所示:
#### 4.2.3 基于判别器改进的GAN模型
- GAN 模型训练过程中,最初的生成样本质量较差,判别器可以简单地区分样本,这导致生成器初始训练速度慢。改进判别器,使其符合生成器当前能力有助于加快训练,使其识别多种模式可以缓解模式崩溃问题。改进思路包括使单一判别器能识别出更多模式,以及使多个判别器中的每个判别器专注于识别特定模式等。
+ GAN 模型训练过程中,最初的生成样本质量较差,判别器可以简单地区分样本,这导致生成器初始训练速度慢。改进判别器,使其符合生成器当前能力有助于加快训练,使其识别多种模式可以缓解模式崩溃问题。改进思路包括使单一判别器能识别出更多模式,以及使多个判别器中的每个判别器专注于识别特定模式等。
- PacGAN 模型如**图21**所示。PacGAN 将同一类的多个样本“打包”后一起输入判别器,以此保证每次判别器输入的样本都具有多样性。由于判别器每次接受输入时都能感知到样本的多样性,生成器试图欺骗判别器时,需要保证生成样本的多样性,这有助于缓解模式崩溃问题。
+ PacGAN 模型如**图21**所示。PacGAN 将同一类的多个样本“打包”后一起输入判别器,以此保证每次判别器输入的样本都具有多样性。由于判别器每次接受输入时都能感知到样本的多样性,生成器试图欺骗判别器时,需要保证生成样本的多样性,这有助于缓解模式崩溃问题。

@@ -323,7 +323,7 @@ CycleGAN的网络架构如图所示:
- GMAN ( generative multi-adversarial net-works)模型的提出者认为过度改进判别器会使目标函数过于苛刻,反而抑制生成器学习,因此提出一种结合集成学习的方法,通过设置多个判别器,生成器从多判别器聚合结果中学习,从而使网络加速收敛。GMAN 模型如**图22**所示。
+ GMAN ( generative multi-adversarial net-works)模型的提出者认为过度改进判别器会使目标函数过于苛刻,反而抑制生成器学习,因此提出一种结合集成学习的方法,通过设置多个判别器,生成器从多判别器聚合结果中学习,从而使网络加速收敛。GMAN 模型如**图22**所示。

@@ -331,7 +331,7 @@ CycleGAN的网络架构如图所示:
- DropoutGAN设置了一组判别器,在每批样本训练结束时,以一定概率删除该结果,将剩余结果聚合后反馈到生成器,以此使生成器不局限于欺骗特定判别器。DropoutGAN 模型的提出者认为模式崩溃问题是生成器对特定判别器或静态集成判别器的过度拟合,即生成器学习到了使判别器输出真值的特殊条件而非学习到了样本模式,而该模型的结构中,判别器集合是动态变化的,生成器无法学习到欺骗判别器的特殊条件,从而使生成器学习多种样本模式,有助于缓解模式崩溃问题。DropoutGAN 模型如**图23**所示。
+ DropoutGAN设置了一组判别器,在每批样本训练结束时,以一定概率删除该结果,将剩余结果聚合后反馈到生成器,以此使生成器不局限于欺骗特定判别器。DropoutGAN 模型的提出者认为模式崩溃问题是生成器对特定判别器或静态集成判别器的过度拟合,即生成器学习到了使判别器输出真值的特殊条件而非学习到了样本模式,而该模型的结构中,判别器集合是动态变化的,生成器无法学习到欺骗判别器的特殊条件,从而使生成器学习多种样本模式,有助于缓解模式崩溃问题。DropoutGAN 模型如**图23**所示。

@@ -339,7 +339,7 @@ CycleGAN的网络架构如图所示:
- D2GAN(dual discriminator GAN)设置了两个判别器 D 1 、D 2 ,分别使用正向 KL 散度及逆向 KL 散度,以充分利用二者互补的统计特性。其中 D 1 通过正确判定样本来自真实样本分布获得奖励,D 2 则通过正确判定样本来自生成样本分布获得奖励。生成器同时欺骗两个判别器,以此来提升生成样本的质量。D2GAN 模型如**图24**所示。
+ D2GAN(dual discriminator GAN)设置了两个判别器 D 1 、D 2 ,分别使用正向 KL 散度及逆向 KL 散度,以充分利用二者互补的统计特性。其中 D 1 通过正确判定样本来自真实样本分布获得奖励,D 2 则通过正确判定样本来自生成样本分布获得奖励。生成器同时欺骗两个判别器,以此来提升生成样本的质量。D2GAN 模型如**图24**所示。

@@ -347,9 +347,9 @@ CycleGAN的网络架构如图所示:
- StabilizingGAN 模型的提出者认为真实样本在空间中集中分布,而生成样本初始时在空间中分散分布,导致训练初期判别器能够准确判断出几乎所有生成样本,产生无效梯度,使生成器训练缓慢。因此,他们提出同时训练一组视角受限的判别器,每个判别器都专注于空间中的一部分投影,生成器逐渐满足所有判别器的限制,以此稳定训练,提升生成样本质量。
+ StabilizingGAN 模型的提出者认为真实样本在空间中集中分布,而生成样本初始时在空间中分散分布,导致训练初期判别器能够准确判断出几乎所有生成样本,产生无效梯度,使生成器训练缓慢。因此,他们提出同时训练一组视角受限的判别器,每个判别器都专注于空间中的一部分投影,生成器逐渐满足所有判别器的限制,以此稳定训练,提升生成样本质量。
- 在 EBGAN(energy-based GAN)模型(如**图25**所示)中引入了能量函数的方法,事物间差异越大能量越高,故而真实分布附近样本具有较低能量。其研究者设计了一个由编码器和解码器构成的判别器,使用 MSE(mean square error)衡量生成样本与真实样本的差异并作为能量函数,生成器目标为生成最小化能量的生成样本。BEGAN(boundary equilibrium GAN)使用自编码器替代ZHAO等人提出来的模型中的判别器。
+ 在 EBGAN(energy-based GAN)模型(如**图25**所示)中引入了能量函数的方法,事物间差异越大能量越高,故而真实分布附近样本具有较低能量。其研究者设计了一个由编码器和解码器构成的判别器,使用 MSE(mean square error)衡量生成样本与真实样本的差异并作为能量函数,生成器目标为生成最小化能量的生成样本。BEGAN(boundary equilibrium GAN)使用自编码器替代ZHAO等人提出来的模型中的判别器。

@@ -359,11 +359,11 @@ CycleGAN的网络架构如图所示:
#### 4.2.4 基于多模块组合改进的 GAN 模型
- 除了更好地拟合真实样本分布之外,提升网络收敛的速度、提高生成图片的清晰度、将其应用在半监督学习上等同样是 GAN 模型改进的方向。这类研究工作通过调整模块结构,对不同的影响因素加以优化处理,使模型达到特定目的。
+ 除了更好地拟合真实样本分布之外,提升网络收敛的速度、提高生成图片的清晰度、将其应用在半监督学习上等同样是 GAN 模型改进的方向。这类研究工作通过调整模块结构,对不同的影响因素加以优化处理,使模型达到特定目的。
- GRAN( generative recurrent adversarialnetworks)是一种递归生成模型,它反复生成以上一状态为条件的输出,最终得到更符合人类直觉的生成样本。
+ GRAN( generative recurrent adversarialnetworks)是一种递归生成模型,它反复生成以上一状态为条件的输出,最终得到更符合人类直觉的生成样本。
- StackGAN 以MIRZA的模型为基础构建了一种两阶段模型(如**图26**所示)。它将文本描述作为额外信息,阶段一生成较低分辨率的图像并输出至阶段二,阶段二输出较高分辨率的图像,从而提高生成图像的分辨率。
+ StackGAN 以MIRZA的模型为基础构建了一种两阶段模型(如**图26**所示)。它将文本描述作为额外信息,阶段一生成较低分辨率的图像并输出至阶段二,阶段二输出较高分辨率的图像,从而提高生成图像的分辨率。

@@ -371,9 +371,9 @@ CycleGAN的网络架构如图所示:
- ProgressGAN 模型的提出者认为小尺度图像能够保证多样性且细节不丢失,他们使用多个且逐渐增大的 WGAN-GP 网络,逐步训练最终生成高清图像。
+ ProgressGAN 模型的提出者认为小尺度图像能够保证多样性且细节不丢失,他们使用多个且逐渐增大的 WGAN-GP 网络,逐步训练最终生成高清图像。
- TripleGAN 通过增加一个分类器网络为真实样本生成标签,生成器为真实标签生成样本,判别器判别接收的样本标签对是否为有真实标签的真实样本,从而同时训练出效果较好的分类器和生成器,将 GAN 的能力扩展到可以为无标签样本打标签。TripleGAN 模型如**图 27**所示。
+ TripleGAN 通过增加一个分类器网络为真实样本生成标签,生成器为真实标签生成样本,判别器判别接收的样本标签对是否为有真实标签的真实样本,从而同时训练出效果较好的分类器和生成器,将 GAN 的能力扩展到可以为无标签样本打标签。TripleGAN 模型如**图 27**所示。

@@ -381,7 +381,7 @@ CycleGAN的网络架构如图所示:
- ControlGAN 模型的提出者认为MIRZA的模型中的判别器同时承担了真实样本分类与判别真假样本两个任务,因此将其拆分为独立的分类器和判别器,从而在有条件生成样本时更细粒度地控制生成样本的特征。ControlGAN 模型如**图28**所示。
+ ControlGAN 模型的提出者认为MIRZA的模型中的判别器同时承担了真实样本分类与判别真假样本两个任务,因此将其拆分为独立的分类器和判别器,从而在有条件生成样本时更细粒度地控制生成样本的特征。ControlGAN 模型如**图28**所示。

@@ -389,7 +389,7 @@ CycleGAN的网络架构如图所示:
- SGAN(several local pairs GAN)使用若干组局部网络对和一组全局网络对,每组网络对有一个生成器与一个判别器。局部网络对使用固定的配对网络进行训练,不同局部网络对之间没有信息交互,全局网络利用局部网络进行训练。由于每一个局部网络对都可以学到一种模式,在使用局部网络对更新全局网络对后,能够保证全局网络对综合了多种模式,从而缓解模式崩溃问题。SGAN 模型如**图29**所示。
+ SGAN(several local pairs GAN)使用若干组局部网络对和一组全局网络对,每组网络对有一个生成器与一个判别器。局部网络对使用固定的配对网络进行训练,不同局部网络对之间没有信息交互,全局网络利用局部网络进行训练。由于每一个局部网络对都可以学到一种模式,在使用局部网络对更新全局网络对后,能够保证全局网络对综合了多种模式,从而缓解模式崩溃问题。SGAN 模型如**图29**所示。

@@ -397,13 +397,13 @@ CycleGAN的网络架构如图所示:
- MemoryGAN 模型的提出者认为隐空间具有连续的分布,但不同种类的结构却具有不连续性,因此在网络中加入存储网络供生成器和判别器访问,使生成器和判别器学习数据的聚类分布以优化该问题。
+ MemoryGAN 模型的提出者认为隐空间具有连续的分布,但不同种类的结构却具有不连续性,因此在网络中加入存储网络供生成器和判别器访问,使生成器和判别器学习数据的聚类分布以优化该问题。
#### 4.2.5 基于模型交叉思想改进的GAN模型
- 结合其他生成模型思想及其他领域思想对GAN 模型进行改进,同样可以起到优化模型表现或拓展模型应用场景的效果。
+ 结合其他生成模型思想及其他领域思想对GAN 模型进行改进,同样可以起到优化模型表现或拓展模型应用场景的效果。
- DCGAN 使 用 去 除 池 化 层 的 CNN(convolutional neural network)替代基本 GAN 模型中的多层感知机(如**图 30**所示),并使用全局池化层替代全连接层以减少计算量,以提高生成样本的质量,优化训练不稳定的问题。
+ DCGAN 使 用 去 除 池 化 层 的 CNN(convolutional neural network)替代基本 GAN 模型中的多层感知机(如**图 30**所示),并使用全局池化层替代全连接层以减少计算量,以提高生成样本的质量,优化训练不稳定的问题。

@@ -411,7 +411,7 @@ CycleGAN的网络架构如图所示:
- CapsuleGAN 使用胶囊网络作为判别器的框架(如**图31**所示)。胶囊网络可以用于替代神经元,将节点输出由一个值转变为一个向量,神经元用于检测某个特定模式,而胶囊网络可以检测某个种类的模式,以此提高判别器的泛化能力,从而提高生成样本质量。
+ CapsuleGAN 使用胶囊网络作为判别器的框架(如**图31**所示)。胶囊网络可以用于替代神经元,将节点输出由一个值转变为一个向量,神经元用于检测某个特定模式,而胶囊网络可以检测某个种类的模式,以此提高判别器的泛化能力,从而提高生成样本质量。

@@ -419,13 +419,13 @@ CycleGAN的网络架构如图所示:
- VAEGAN 利用GAN来提高VAE生成样本的质量。其观点是:在 VAE 中,编码器将真实分布编码到隐空间,而解码器将隐空间恢复为真实分布。单独解码器即可用作生成模型,但生成样本质量较差,因此再将其输入判别器中。
+ VAEGAN 利用GAN来提高VAE生成样本的质量。其观点是:在 VAE 中,编码器将真实分布编码到隐空间,而解码器将隐空间恢复为真实分布。单独解码器即可用作生成模型,但生成样本质量较差,因此再将其输入判别器中。
- DEGAN(decoder-encoder GAN)模型的提出者认为输入的随机变量服从高斯分布,因此生成器需将整个高斯分布映射到图像,无法反映真实样本分布。因此借鉴 VAE 的思想,在 GAN中加入预训练的编码器与解码器,将随机变量映射为含有真实样本分布信息的变量,再传递给GAN,从而加速收敛并提高生成质量。
+ DEGAN(decoder-encoder GAN)模型的提出者认为输入的随机变量服从高斯分布,因此生成器需将整个高斯分布映射到图像,无法反映真实样本分布。因此借鉴 VAE 的思想,在 GAN中加入预训练的编码器与解码器,将随机变量映射为含有真实样本分布信息的变量,再传递给GAN,从而加速收敛并提高生成质量。
- AAE(adversarial auto-encoder)通过在 AE(auto-encoder)的隐藏层中增加对抗的思想来结合 AE 与 GAN。判别器通过判断数据是来自隐藏层还是真实样本,使编码器的分布向真实样本分布靠近。
+ AAE(adversarial auto-encoder)通过在 AE(auto-encoder)的隐藏层中增加对抗的思想来结合 AE 与 GAN。判别器通过判断数据是来自隐藏层还是真实样本,使编码器的分布向真实样本分布靠近。
- BiGAN 使用编码器来提取真实样本特征,使用解码器来模仿生成器,并使用判别器来辨别特征样本对来自编码器还是解码器,最终使编码方式和解码方式趋近于互逆,从而使随机变量与真实数据形成映射。ALi和 BiGAN 本质相同,二者仅有细微区别。BiGAN 模型如**图32**所示。
+ BiGAN 使用编码器来提取真实样本特征,使用解码器来模仿生成器,并使用判别器来辨别特征样本对来自编码器还是解码器,最终使编码方式和解码方式趋近于互逆,从而使随机变量与真实数据形成映射。ALi和 BiGAN 本质相同,二者仅有细微区别。BiGAN 模型如**图32**所示。

@@ -433,13 +433,13 @@ CycleGAN的网络架构如图所示:
- MatAN(matching adversarial network)使用孪生网络替换判别器,以将正确标签考虑在生成器目标函数中。孪生网络用于衡量真实数据与生成数据的相似度。该方法对加快生成器训练有效。
+ MatAN(matching adversarial network)使用孪生网络替换判别器,以将正确标签考虑在生成器目标函数中。孪生网络用于衡量真实数据与生成数据的相似度。该方法对加快生成器训练有效。
- SAGAN(self-attention GAN)模型的提出者认为 GAN 在合成结构约束少的种类上表现较好,但难以捕捉复杂的模式,通过在网络中引入自注意力机制以解决该问题。
+ SAGAN(self-attention GAN)模型的提出者认为 GAN 在合成结构约束少的种类上表现较好,但难以捕捉复杂的模式,通过在网络中引入自注意力机制以解决该问题。
- KDGAN 运用 KD(knowledge distillation)的思想,模型包含作为学生网络的轻量分类器、大型复杂教师网络及判别器,其中,分类器和教师网络都生成标签,二者通过互相蒸馏输出学习彼此的知识,最终可训练得到表现较好的轻量级分类器。
+ KDGAN 运用 KD(knowledge distillation)的思想,模型包含作为学生网络的轻量分类器、大型复杂教师网络及判别器,其中,分类器和教师网络都生成标签,二者通过互相蒸馏输出学习彼此的知识,最终可训练得到表现较好的轻量级分类器。
- IRGAN 利用 GAN 将 IR(information re-trieval)领域中的生成式检索模型与判别式检索模型相结合,对于生成器采用基于策略梯度的强化学习来训练,从而在典型的信息检索任务中取得较好的表现。IRGAN 模型如**图33**所示。
+ IRGAN 利用 GAN 将 IR(information re-trieval)领域中的生成式检索模型与判别式检索模型相结合,对于生成器采用基于策略梯度的强化学习来训练,从而在典型的信息检索任务中取得较好的表现。IRGAN 模型如**图33**所示。

@@ -447,25 +447,25 @@ CycleGAN的网络架构如图所示:
- LapGAN使用了图像处理领域的思想,同时使用三组 cGAN ,按照高斯金字塔的模式对图像逐级下采样训练网络,按照拉普拉斯金字塔的模式对图像逐级上采样,从而达到从模糊图像中重构高像素图像的目的。
+ LapGAN使用了图像处理领域的思想,同时使用三组 cGAN ,按照高斯金字塔的模式对图像逐级下采样训练网络,按照拉普拉斯金字塔的模式对图像逐级上采样,从而达到从模糊图像中重构高像素图像的目的。
- QuGAN 将 GAN的思想与量子计算的思想相结合,将生成器类比生成线路,判别器类比判别线路,生成线路尽可能模仿真实线路的波函数,判别线路尽可能仅通过对辅助比特的测量来确定输入的波函数来自生成线路还是真实线路。
+ QuGAN 将 GAN的思想与量子计算的思想相结合,将生成器类比生成线路,判别器类比判别线路,生成线路尽可能模仿真实线路的波函数,判别线路尽可能仅通过对辅助比特的测量来确定输入的波函数来自生成线路还是真实线路。
- BayesianGAN 模型的提出者认为 GAN 隐式学习分布的方法难以显式建模,因此提出使用随机梯度哈密顿蒙特卡洛方法来边际化两个神经网络的权值,从而使数据表示具有可解释性。
+ BayesianGAN 模型的提出者认为 GAN 隐式学习分布的方法难以显式建模,因此提出使用随机梯度哈密顿蒙特卡洛方法来边际化两个神经网络的权值,从而使数据表示具有可解释性。
## 5、GAN的应用

- GANs是一个强大的生成模型,它可以使用随机向量生成逼真的样本。我们既不需要知道明确的真实数据分布,也不需要任何数学假设。这些优点使得GANs被广泛应用于图像处理、计算机视觉、序列数据等领域。上图是基于GANs的实际应用场景对不同GAN进行了分类,包括图像超分辨率、图像合成与处理、纹理合成、目标检测、视频合成、音频合成、多模态转变等。
+ GANs是一个强大的生成模型,它可以使用随机向量生成逼真的样本。我们既不需要知道明确的真实数据分布,也不需要任何数学假设。这些优点使得GANs被广泛应用于图像处理、计算机视觉、序列数据等领域。上图是基于GANs的实际应用场景对不同GAN进行了分类,包括图像超分辨率、图像合成与处理、纹理合成、目标检测、视频合成、音频合成、多模态转变等。
### 5.1 计算机视觉与图像处理
- GANs最成功的应用是图像处理和计算机视觉,如图像超分辨率、图像合成和处理以及视频处理。
+ GANs最成功的应用是图像处理和计算机视觉,如图像超分辨率、图像合成和处理以及视频处理。
#### 5.1.1 超分辨率(SR)
- 图像超分辨率技术主要解决将低分辨率的图像在不失真的前提下转变为高分辨率的问题,且需要在准确性和速度斱面保持优越性能,此外超分辨率技术可解决例如医学诊断、视频监控、卫星遥感等场景的部分行业痛点问题,应用此技术产生的社会实际价值不可估量。基于深度学习的图像超分辨技术可分为:有监督、无监督、特定应用领域三种类型。SR-GAN 模型将参数化的残差网络代替生成器,而判别器则选用了 VGG 网络,其损失函数通过内容损失和对抗损失的加权组合,相比其他深度卷积网络等模型在超分辨精度和速度上得到了改进,将图像纹理细节的学习表征较好,故而在超分辨领域取得了不俗的效果。
+ 图像超分辨率技术主要解决将低分辨率的图像在不失真的前提下转变为高分辨率的问题,且需要在准确性和速度斱面保持优越性能,此外超分辨率技术可解决例如医学诊断、视频监控、卫星遥感等场景的部分行业痛点问题,应用此技术产生的社会实际价值不可估量。基于深度学习的图像超分辨技术可分为:有监督、无监督、特定应用领域三种类型。SR-GAN 模型将参数化的残差网络代替生成器,而判别器则选用了 VGG 网络,其损失函数通过内容损失和对抗损失的加权组合,相比其他深度卷积网络等模型在超分辨精度和速度上得到了改进,将图像纹理细节的学习表征较好,故而在超分辨领域取得了不俗的效果。
#### 5.1.2 图像合成与处理
@@ -477,69 +477,69 @@ CycleGAN的网络架构如图所示:
##### 一般目标
- 让GAN处理像ImageNet这样的分类数据集有点困难,因为ImageNet有一千个不同的对象类,并且这些图像的质量逐年提高。
+ 让GAN处理像ImageNet这样的分类数据集有点困难,因为ImageNet有一千个不同的对象类,并且这些图像的质量逐年提高。
- 虽然大多数论文使用GANs合成二维图像,但Wu等人使用GANs和体积卷积合成了三维(3-D)样本。Wu等人合成了汽车、椅子、沙发和桌子等新奇物品。Im等人利用反复出现的敌对网络生成图像。Yang等人提出了用于图像生成的分层递归GAN(LR-GAN)。
+ 虽然大多数论文使用GANs合成二维图像,但Wu等人使用GANs和体积卷积合成了三维(3-D)样本。Wu等人合成了汽车、椅子、沙发和桌子等新奇物品。Im等人利用反复出现的敌对网络生成图像。Yang等人提出了用于图像生成的分层递归GAN(LR-GAN)。
##### 图像修复
- 图像补全是一种传统的图像修复处理仸务,其目的是填补图像中内容缺失或被遮盖的部分,在目前的生产生活环境中此类仸务得到广泛的现实应用。大多数补全方法都是基于低级线索,仍图像的邻近区域中寻找小块,幵创建与小块相似的合成内容。王海涌等人借助此原理,实现了局部遮挡情况下的人脸表情识别,识别效率较高。与现有的寻找补全块迚行合成的模型不同,相关研究文献提出的模型基于 CNN 生成缺失区域的内容。该算法采用重构损失函数、两个对抗性损失函数和一个语义解析损失函数迚行训练,以保证像素质量和局部-全局内容的稳定性。
+ 图像补全是一种传统的图像修复处理仸务,其目的是填补图像中内容缺失或被遮盖的部分,在目前的生产生活环境中此类仸务得到广泛的现实应用。大多数补全方法都是基于低级线索,仍图像的邻近区域中寻找小块,幵创建与小块相似的合成内容。王海涌等人借助此原理,实现了局部遮挡情况下的人脸表情识别,识别效率较高。与现有的寻找补全块迚行合成的模型不同,相关研究文献提出的模型基于 CNN 生成缺失区域的内容。该算法采用重构损失函数、两个对抗性损失函数和一个语义解析损失函数迚行训练,以保证像素质量和局部-全局内容的稳定性。
##### 人与图像生成过程之间的交互
- 有许多应用程序涉及人与图像生成过程之间的交互。真实图像操作很困难,因为它需要以用户控制的方式修改图像,同时使其看起来真实。如果用户没有有效的艺术技巧,编辑时很容易偏离自然图像的多样性。交互式GAN(IGAN)定义了一类图像编辑操作,并将其输出约束为始终位于学习的流形上。
+ 有许多应用程序涉及人与图像生成过程之间的交互。真实图像操作很困难,因为它需要以用户控制的方式修改图像,同时使其看起来真实。如果用户没有有效的艺术技巧,编辑时很容易偏离自然图像的多样性。交互式GAN(IGAN)定义了一类图像编辑操作,并将其输出约束为始终位于学习的流形上。
#### 5.1.3 纹理合成
- 纹理合成是图像领域的经典问题。Markovian GANs(MGAN)是一种基于GANs的纹理合成方法。通过捕获马尔可夫面片的纹理数据,MGAN可以快速生成风格化的视频和图像,从而实现实时纹理合成。空间GAN(SGAN)是第一个将GAN与完全无监督学习应用于纹理合成的人。周期性空间GAN(PSGAN)是SGAN的一个变体,它可以从单个图像或复杂的大数据集中学习周期性纹理。
+ 纹理合成是图像领域的经典问题。Markovian GANs(MGAN)是一种基于GANs的纹理合成方法。通过捕获马尔可夫面片的纹理数据,MGAN可以快速生成风格化的视频和图像,从而实现实时纹理合成。空间GAN(SGAN)是第一个将GAN与完全无监督学习应用于纹理合成的人。周期性空间GAN(PSGAN)是SGAN的一个变体,它可以从单个图像或复杂的大数据集中学习周期性纹理。
#### 5.1.4 目标检测
- 我们如何学习对变形和遮挡保持不变的对象检测器?一种方法是使用数据驱动策略——收集在不同条件下具有对象示例的大规模数据集。我们希望最终的分类器能够使用这些实例来学习不变性。是否可以查看数据集中的所有变形和遮挡?一些变形和遮挡非常罕见,在实际应用中几乎不会发生;然而,我们想学习一种对这种情况不变的方法。Wang等人使用GANs生成具有变形和遮挡的实例。对手的目标是生成对象检测器难以分类的实例。通过使用切割器和GANs,Segan检测到图像中被其他对象遮挡的对象。为了解决小目标检测问题,Li等人提出了感知GAN,Bai等人提出了端到端多任务GAN(MTGAN)。
+ 我们如何学习对变形和遮挡保持不变的对象检测器?一种方法是使用数据驱动策略——收集在不同条件下具有对象示例的大规模数据集。我们希望最终的分类器能够使用这些实例来学习不变性。是否可以查看数据集中的所有变形和遮挡?一些变形和遮挡非常罕见,在实际应用中几乎不会发生;然而,我们想学习一种对这种情况不变的方法。Wang等人使用GANs生成具有变形和遮挡的实例。对手的目标是生成对象检测器难以分类的实例。通过使用切割器和GANs,Segan检测到图像中被其他对象遮挡的对象。为了解决小目标检测问题,Li等人提出了感知GAN,Bai等人提出了端到端多任务GAN(MTGAN)。
#### 5.1.5 视频
- Villegas等人提出了一种深度神经网络,用于使用GANs预测自然视频序列中的未来帧。Denton和Birodkar提出了一个新模型,名为解纠缠表示网(DRNET),该模型基于GANs从视频中学习解纠缠图像表示。相关研究文献提出了一种新的生成性对抗学习框架下的视频到视频合成方法(video2video)。MoCoGan建议分解运动和内容以生成视频。GAN还被用于其他视频应用,如视频预测和视频重定目标。
+ Villegas等人提出了一种深度神经网络,用于使用GANs预测自然视频序列中的未来帧。Denton和Birodkar提出了一个新模型,名为解纠缠表示网(DRNET),该模型基于GANs从视频中学习解纠缠图像表示。相关研究文献提出了一种新的生成性对抗学习框架下的视频到视频合成方法(video2video)。MoCoGan建议分解运动和内容以生成视频。GAN还被用于其他视频应用,如视频预测和视频重定目标。
- 视频可通过逐帧分解理解为多张图片的组合,故而在 GAN 生成图像的基础上,实现视频的生成和预测 。视频一般而言是由相对静止的背景色和动态的物体运动组成的,VGAN考虑了这一点,使用双流生成器以 3D CNN 的移动前景生成器预测下一帧,而使用 2D CNN 的静态背景生成器使背景保持静止。Pose-GAN采用混合VAE和GAN斱法,它使用 VAE 斱法在当前的物体姿态和过去姿态隐藏的表示来估计未来的物体运动。
+ 视频可通过逐帧分解理解为多张图片的组合,故而在 GAN 生成图像的基础上,实现视频的生成和预测 。视频一般而言是由相对静止的背景色和动态的物体运动组成的,VGAN考虑了这一点,使用双流生成器以 3D CNN 的移动前景生成器预测下一帧,而使用 2D CNN 的静态背景生成器使背景保持静止。Pose-GAN采用混合VAE和GAN斱法,它使用 VAE 斱法在当前的物体姿态和过去姿态隐藏的表示来估计未来的物体运动。
- 基于视频的 GAN 不仅需要考虑空间建模,还需要考虑时间建模,即视频序列中每个相邻帧之间的运动。MoCoGAN被提出以无监督的斱式学习运动和内容,它将图像的潜在空间划分为内容空间和运动空间。DVD-GAN能够基于 BigGAN 架构生成更长、更高分辨率的视频,同时引入可扩展的、视频专用的生成器和鉴别器架构。
+ 基于视频的 GAN 不仅需要考虑空间建模,还需要考虑时间建模,即视频序列中每个相邻帧之间的运动。MoCoGAN被提出以无监督的斱式学习运动和内容,它将图像的潜在空间划分为内容空间和运动空间。DVD-GAN能够基于 BigGAN 架构生成更长、更高分辨率的视频,同时引入可扩展的、视频专用的生成器和鉴别器架构。
#### 5.1.6 其他图像和视觉应用
- GANs已被用于其他图像处理和计算机视觉任务,如对象变形、语义分割、视觉显著性预测、对象跟踪、图像去杂、自然图像抠图、图像修复、图像融合,图像完成,图像分类。
+ GANs已被用于其他图像处理和计算机视觉任务,如对象变形、语义分割、视觉显著性预测、对象跟踪、图像去杂、自然图像抠图、图像修复、图像融合,图像完成,图像分类。
### 5.2 时序数据
- GANs在自然语言、音乐、语音、语音和时间序列等顺序数据方面也取得了成就。
+ GANs在自然语言、音乐、语音、语音和时间序列等顺序数据方面也取得了成就。
#### 5.2.1 自然语言处理
- GAN 在图像上的性能表现,让众多研究者在文本生成领域也提出了基于 GAN 的一些模型。SeqGAN 与强化学习结合,避免了一般 GAN 模型不能生成离散序列,且可在生成离散数据时能够返回模型的梯度值,此类斱法可用于生成语音数据、机器翻译等场景。MaskGAN模型,引入了 Actor-Critic 架构可根据上下文内容填补缺失的文本信息。
+ GAN 在图像上的性能表现,让众多研究者在文本生成领域也提出了基于 GAN 的一些模型。SeqGAN 与强化学习结合,避免了一般 GAN 模型不能生成离散序列,且可在生成离散数据时能够返回模型的梯度值,此类斱法可用于生成语音数据、机器翻译等场景。MaskGAN模型,引入了 Actor-Critic 架构可根据上下文内容填补缺失的文本信息。
- 除了图像生成文本的应用,文献 StackGAN 可实现通过输入文本信息来产生相应的文本所描述的图像且图像具有高分辨率,此模型实现了文本与图像的交互生成。此外 CookGAN 从图像因果链的角度实现了基于文本生成图像菜单的方法。而TiVGAN 则实现了通过文本来产生连续性视频序列的构想。
+ 除了图像生成文本的应用,文献 StackGAN 可实现通过输入文本信息来产生相应的文本所描述的图像且图像具有高分辨率,此模型实现了文本与图像的交互生成。此外 CookGAN 从图像因果链的角度实现了基于文本生成图像菜单的方法。而TiVGAN 则实现了通过文本来产生连续性视频序列的构想。
#### 5.2.2 音乐
- GANs被用于生成音乐,如连续RNN-GAN(C-RNN-GAN)、连续RNN-GAN(风琴)和SeqGAN。
+ GANs被用于生成音乐,如连续RNN-GAN(C-RNN-GAN)、连续RNN-GAN(风琴)和SeqGAN。
#### 5.2.3 语音和音频
- GANs已用于语音和音频分析,如合成、增强和识别。
+ GANs已用于语音和音频分析,如合成、增强和识别。
### 5.3 其他应用
#### 5.3.1 医学领域
- 一般来说,在医学成像中使用 GANs 有两种方法:第一种集中在生成阶段,这有助于实现训练数据的基本结构,以创建真实的图像,使得 GANs 能够 更 好 地 处 理 数 据 稀 缺 性 和 患 者 隐 私 问 题。第二种集中在判别阶段,其中判别器可以被认为是未处理图像的先验学习,因此可以作为伪生成图像的检测器。
+ 一般来说,在医学成像中使用 GANs 有两种方法:第一种集中在生成阶段,这有助于实现训练数据的基本结构,以创建真实的图像,使得 GANs 能够 更 好 地 处 理 数 据 稀 缺 性 和 患 者 隐 私 问 题。第二种集中在判别阶段,其中判别器可以被认为是未处理图像的先验学习,因此可以作为伪生成图像的检测器。
- 生成阶段:Sandfort 等人提出了一种基于CycleGAN 的数据增强模型,以提高 CT 分割中的泛化性。Han 等人提出了一种基于 GAN 的两阶段无监督异常检测 MRI 扫描斱法。
+ 生成阶段:Sandfort 等人提出了一种基于CycleGAN 的数据增强模型,以提高 CT 分割中的泛化性。Han 等人提出了一种基于 GAN 的两阶段无监督异常检测 MRI 扫描斱法。
- 判别阶段:Tang 等人提出了一种基于叠加生成对抗网络的 CT 图像分割斱法,网络第一层减少CT 图像中的噪声,第二层创建具有增强边界的更高分辨率图像。Dou 等人提出了用于 MRI 和 CT 的 GANs,通过以无监督斱式支持源域和目标域的特征空间来处理高效的域转移。
+ 判别阶段:Tang 等人提出了一种基于叠加生成对抗网络的 CT 图像分割斱法,网络第一层减少CT 图像中的噪声,第二层创建具有增强边界的更高分辨率图像。Dou 等人提出了用于 MRI 和 CT 的 GANs,通过以无监督斱式支持源域和目标域的特征空间来处理高效的域转移。
#### 5.3.2 三维重构
- GAN 在三维空间上对物体的立体形状补全或重构,是对三维重构技术的完善和扩展。Wang 等人提出了一种混合结构,使用递归卷积网络(LRCN)的3D-ED-GAN 模型。Wu 等人提出了 3D-VAE-GAN 模型,该模型利用体积卷积网络和生成对抗网络最新的研究理论仍概率空间生成 3D 对象。相关研究文献介绍了一种新的GAN 训练模型来实现物体详细的三维形状。该模型采用带梯度惩罚的 Wasserstein 归一化训练,提高了图像的真实感,这种架构甚至可以仍 2D 图像中重建3D 形状并完成形状补全。
+ GAN 在三维空间上对物体的立体形状补全或重构,是对三维重构技术的完善和扩展。Wang 等人提出了一种混合结构,使用递归卷积网络(LRCN)的3D-ED-GAN 模型。Wu 等人提出了 3D-VAE-GAN 模型,该模型利用体积卷积网络和生成对抗网络最新的研究理论仍概率空间生成 3D 对象。相关研究文献介绍了一种新的GAN 训练模型来实现物体详细的三维形状。该模型采用带梯度惩罚的 Wasserstein 归一化训练,提高了图像的真实感,这种架构甚至可以仍 2D 图像中重建3D 形状并完成形状补全。

@@ -547,19 +547,19 @@ CycleGAN的网络架构如图所示:
- 3D-RecGAN 是一个随机深度视图重建指定对象的完整三维结构。该模型在GAN 结构上是一种编码器-解码器 3D 深度神经网络,结合了两个目标损失:用于 3D 物体重建的损失和改迚的 Wasserstein GAN 损失。也有人做出了用于语义部件编辑、形状类比和形状揑值以及三维物体形状补全的代数操作和深度自动编码器 GAN (AE-EMD)。
+ 3D-RecGAN 是一个随机深度视图重建指定对象的完整三维结构。该模型在GAN 结构上是一种编码器-解码器 3D 深度神经网络,结合了两个目标损失:用于 3D 物体重建的损失和改迚的 Wasserstein GAN 损失。也有人做出了用于语义部件编辑、形状类比和形状揑值以及三维物体形状补全的代数操作和深度自动编码器 GAN (AE-EMD)。
#### 5.3.3 数据科学
- GANs已用于数据生成、神经网络生成、数据增强、空间表示学习、网络嵌入、异构信息网络和移动用户评测。
+ GANs已用于数据生成、神经网络生成、数据增强、空间表示学习、网络嵌入、异构信息网络和移动用户评测。
- GANs已经广泛应用于许多其他领域,如恶意软件检测、国际象棋游戏、隐写术、隐私保护、社交机器人和网络修剪。
+ GANs已经广泛应用于许多其他领域,如恶意软件检测、国际象棋游戏、隐写术、隐私保护、社交机器人和网络修剪。
## 6、常用数据集
- 一般来说,基于图像的GANs方法使用的数据集,是基于现有数据图像基础上进行上(下)采样,增加干扰处理。处理后的图像与原图像作为一对图像用于GANs网络的训练。其他方面如视频、文字等,也是在已有的开源(或闭源)数据集上经过预处理后,讲原始数据作为标签进行网络的训练。不过,这样制作的数据集始终不能完全代表实际情况。下面讲介绍五个用于训练GANs的数据集。
+ 一般来说,基于图像的GANs方法使用的数据集,是基于现有数据图像基础上进行上(下)采样,增加干扰处理。处理后的图像与原图像作为一对图像用于GANs网络的训练。其他方面如视频、文字等,也是在已有的开源(或闭源)数据集上经过预处理后,讲原始数据作为标签进行网络的训练。不过,这样制作的数据集始终不能完全代表实际情况。下面讲介绍五个用于训练GANs的数据集。
### 6.1 [抽象艺术数据集](https://www.kaggle.com/bryanb/abstract-art-gallery)
@@ -567,13 +567,13 @@ CycleGAN的网络架构如图所示:

- 此数据集包含从 wikiart.org 刮出的 2782 张抽象艺术图像。这些数据可用于构建 GAN,以生成抽象艺术的合成图像。数据集包含梵高、大理、毕加索等真实抽象艺术的图像。
+ 此数据集包含从 wikiart.org 刮出的 2782 张抽象艺术图像。这些数据可用于构建 GAN,以生成抽象艺术的合成图像。数据集包含梵高、大理、毕加索等真实抽象艺术的图像。
### 6.2 [与 C. 埃莱根斯的高内容筛选](https://www.kaggle.com/kmader/high-content-screening-celegans)

- 这些数据包含与屏幕对应的图像,以找到使用圆虫C.埃莱甘斯的新抗生素。数据有圆虫感染一种叫做*肠球菌的*病原体的图像。有些图像是未用抗生素治疗的圆虫,安皮林,而另一些图像是受感染的圆虫,已经用安培素治疗。对于那些有兴趣将GAN应用于一个有趣的药物发现问题的人来说,这是一个很好的开始!
+ 这些数据包含与屏幕对应的图像,以找到使用圆虫C.埃莱甘斯的新抗生素。数据有圆虫感染一种叫做*肠球菌的*病原体的图像。有些图像是未用抗生素治疗的圆虫,安皮林,而另一些图像是受感染的圆虫,已经用安培素治疗。对于那些有兴趣将GAN应用于一个有趣的药物发现问题的人来说,这是一个很好的开始!
### 6.3 [肺胸X光异常](https://www.kaggle.com/kmader/pulmonary-chest-xray-abnormalities/home)
@@ -581,7 +581,7 @@ CycleGAN的网络架构如图所示:

- 此数据集包含由放射科医生临床标记的胸部 X 射线图像。有336张胸部X光图像与结核病和326张图像对应健康人。对于那些有兴趣使用 GAN 进行医疗图像数据合成的人来说,这是一个很好的数据源。
+ 此数据集包含由放射科医生临床标记的胸部 X 射线图像。有336张胸部X光图像与结核病和326张图像对应健康人。对于那些有兴趣使用 GAN 进行医疗图像数据合成的人来说,这是一个很好的数据源。
### 6.4 [假脸](https://www.kaggle.com/hyperclaw79/fakefaces)
@@ -589,7 +589,7 @@ CycleGAN的网络架构如图所示:

- 这些数据实际上包含由 GAN 生成的人类面孔的合成图像。这些图像是从网站这个人不存在获得的。该网站生成一个新的假脸图像,由GAN制作,每次你刷新页面。这是一组伟大的数据,从生成合成图像与 GAN 开始。
+ 这些数据实际上包含由 GAN 生成的人类面孔的合成图像。这些图像是从网站这个人不存在获得的。该网站生成一个新的假脸图像,由GAN制作,每次你刷新页面。这是一组伟大的数据,从生成合成图像与 GAN 开始。
### 6.5 [眼镜或没有眼镜](https://www.kaggle.com/jeffheaton/glasses-or-no-glasses)
@@ -597,39 +597,39 @@ CycleGAN的网络架构如图所示:

- 此数据集包含带眼镜的面部图像和无眼镜的面部图像。虽然这些图像是使用 GAN 生成的,但它们也可以作为生成其他合成图像的训练数据。
+ 此数据集包含带眼镜的面部图像和无眼镜的面部图像。虽然这些图像是使用 GAN 生成的,但它们也可以作为生成其他合成图像的训练数据。
## 7、前沿问题
- 由于GAN在整个深度学习领域都很流行,其局限性最近得到了改进。对于GANs来说,仍然存在一些有待解决的研究问题。
+ 由于GAN在整个深度学习领域都很流行,其局限性最近得到了改进。对于GANs来说,仍然存在一些有待解决的研究问题。
### 7.1 模式崩溃问题
- 尽管现有研究在解决模式崩溃问题上进行了很多尝试,也取得了一些进展,但如何解决模式崩溃问题依然是 GAN 面临的主要挑战。可以尝试的[解决办法](https://paddlepedia.readthedocs.io/en/latest/tutorials/generative_adversarial_network/basic_concept/Collapse.html)。
+ 尽管现有研究在解决模式崩溃问题上进行了很多尝试,也取得了一些进展,但如何解决模式崩溃问题依然是 GAN 面临的主要挑战。可以尝试的[解决办法](https://paddlepedia.readthedocs.io/en/latest/tutorials/generative_adversarial_network/basic_concept/Collapse.html)。
- 针对 GAN 发生模式崩溃的原因,已有一些研究工作尝试给予解释:将生成器视为一个 N 维流形的参数化描述,当流形上某点的切线空间维数小于 N ,导致在该点沿一些方向进行变化时,数据的变化无效,因此生成器会产生单一的数据; 基于最优传输理论,认为生成器将隐空间的分布映射为流形上的分布是一个传输映射,它具有间断点,是非连续映射,但神经网络目前仅能近似连续映射,从而导致生成无意义结果并引发模式崩溃;当模式崩溃发生时,判别器网络权值矩阵的奇异值急剧减小,可从该问题入手解决模式崩溃问题。
+ 针对 GAN 发生模式崩溃的原因,已有一些研究工作尝试给予解释:将生成器视为一个 N 维流形的参数化描述,当流形上某点的切线空间维数小于 N ,导致在该点沿一些方向进行变化时,数据的变化无效,因此生成器会产生单一的数据; 基于最优传输理论,认为生成器将隐空间的分布映射为流形上的分布是一个传输映射,它具有间断点,是非连续映射,但神经网络目前仅能近似连续映射,从而导致生成无意义结果并引发模式崩溃;当模式崩溃发生时,判别器网络权值矩阵的奇异值急剧减小,可从该问题入手解决模式崩溃问题。
- 与普通神经网络训练过程相比, GAN 模型中存在生成器 G 与判别器 D 之间的博弈机制,这使得 GAN 模式崩溃问题变得复杂。总而言之, GAN模式崩溃问题研究工作尚处于起步阶段,研究出发的角度多样,未形成一个统一的框架来解释该问题。今后的工作如果能从 GAN 的博弈机制出发,将生成器和判别器两方面的相关因素综合起来,会有助于该问题的解决。
+ 与普通神经网络训练过程相比, GAN 模型中存在生成器 G 与判别器 D 之间的博弈机制,这使得 GAN 模式崩溃问题变得复杂。总而言之, GAN模式崩溃问题研究工作尚处于起步阶段,研究出发的角度多样,未形成一个统一的框架来解释该问题。今后的工作如果能从 GAN 的博弈机制出发,将生成器和判别器两方面的相关因素综合起来,会有助于该问题的解决。
### 7.2 训练集样本的影响
- 神经网络的表现主要取决于模型自身的特点,以及训练使用的真实样本集。同样, GAN模型的训练学习的质量也受制于训练样本集的影响。一方面,样本集的自身内在数据分布情况可能会影响 GAN 的训练效率和生成质量。例如,在样本集上定义类内距离集与类间距离集,并依此提出基于距离的可分性指数,用于量化样本可分性,并指出当不同种类样本按相同分布混合时最难以区分,使用这种样本集进行有监督学习时很难使模型有较好表现。这对于 GAN的样本生成质量评价指标设计具有借鉴意义。另一方面, GAN 模型的一大特点是学习真实样本分布,因此需要足够多真实样本进行训练才能有较好表现,研究如何使用小规模训练集得到较好的 GAN 模型是具有挑战和意义的。 GAN 模型对训练集质量也有较高要求,而高质量的数据集往往难以获得,因此研究哪些数据会影响模型表现,如何规避低质量样本带来的负面影响,以降低对训练集质量的高要求,成为今后的研究方向。
+ 神经网络的表现主要取决于模型自身的特点,以及训练使用的真实样本集。同样, GAN模型的训练学习的质量也受制于训练样本集的影响。一方面,样本集的自身内在数据分布情况可能会影响 GAN 的训练效率和生成质量。例如,在样本集上定义类内距离集与类间距离集,并依此提出基于距离的可分性指数,用于量化样本可分性,并指出当不同种类样本按相同分布混合时最难以区分,使用这种样本集进行有监督学习时很难使模型有较好表现。这对于 GAN的样本生成质量评价指标设计具有借鉴意义。另一方面, GAN 模型的一大特点是学习真实样本分布,因此需要足够多真实样本进行训练才能有较好表现,研究如何使用小规模训练集得到较好的 GAN 模型是具有挑战和意义的。 GAN 模型对训练集质量也有较高要求,而高质量的数据集往往难以获得,因此研究哪些数据会影响模型表现,如何规避低质量样本带来的负面影响,以降低对训练集质量的高要求,成为今后的研究方向。
- 此外,在降低训练集样本数量需求方面已有一些研究。通过迁移学习,在预训练的生成器网络和判别器网络上使用适当的样本进行微调,但样本严重不足或样本与预训练数据区别较大时效果不佳。有研究者认为网络权值的奇异值与生成样本的语义有关,因此通过对网络权值进行奇异值分解,微调预训练模型的奇异值来达到使用较少样本训练的目的。在 GAN 上使用元学习,在小样本训练问题上取得了一定的效果。使用重建损失和三元组损失改造GAN 的损失函数,从而将自监督学习的思想引入GAN 中,在小样本训练问题上取得了一些效果。
+ 此外,在降低训练集样本数量需求方面已有一些研究。通过迁移学习,在预训练的生成器网络和判别器网络上使用适当的样本进行微调,但样本严重不足或样本与预训练数据区别较大时效果不佳。有研究者认为网络权值的奇异值与生成样本的语义有关,因此通过对网络权值进行奇异值分解,微调预训练模型的奇异值来达到使用较少样本训练的目的。在 GAN 上使用元学习,在小样本训练问题上取得了一定的效果。使用重建损失和三元组损失改造GAN 的损失函数,从而将自监督学习的思想引入GAN 中,在小样本训练问题上取得了一些效果。
- 对于降低训练集样本质量需求的研究已有一些研究。 NRGAN 在模型中设置了图像生成器和噪声生成器,分别用以学习真实样本中的数据分布和噪声分布,从而在无须预知噪声分布的情况下从有噪训练集中生成无噪样本。
+ 对于降低训练集样本质量需求的研究已有一些研究。 NRGAN 在模型中设置了图像生成器和噪声生成器,分别用以学习真实样本中的数据分布和噪声分布,从而在无须预知噪声分布的情况下从有噪训练集中生成无噪样本。
- 目前,有关训练集样本对 GAN 的影响的研究仍处于初期,缩小训练集规模往往导致对复杂模式支持较差,而降低训练集样本质量需求则伴随着过多假设。后续工作应进一步研究产生这些限制的原因,并以此为指导使其应用场景更符合真实情况。
+ 目前,有关训练集样本对 GAN 的影响的研究仍处于初期,缩小训练集规模往往导致对复杂模式支持较差,而降低训练集样本质量需求则伴随着过多假设。后续工作应进一步研究产生这些限制的原因,并以此为指导使其应用场景更符合真实情况。
### 7.3 与模型鲁棒性问题研究的交叉
- 神经网络鲁棒性反映当输入数据集上出现微小扰动后,模型依然能在输出端表现出抗干扰的能力。 GAN 的研究与人工神经网络鲁棒性的研究相辅相成,密切相关。一方面, GAN 使用对抗样本对网络模型进行训练,有助于提升模型的鲁棒性 。另一方面,神经网络鲁棒性的相关研究与 GAN 的改进存在内在联系,如深度神经网络经过对抗训练后损失在峰值附近更加平滑,以及在 CNN 中使用 Lipschitz 条件可以使模型同时具有较好的鲁棒性与准确性,该领域的相关研究对于 GAN 的改进有一定的参考借鉴价值,特别是在生成对抗样本质量的评价和生成器的目标研究方面。
+ 神经网络鲁棒性反映当输入数据集上出现微小扰动后,模型依然能在输出端表现出抗干扰的能力。 GAN 的研究与人工神经网络鲁棒性的研究相辅相成,密切相关。一方面, GAN 使用对抗样本对网络模型进行训练,有助于提升模型的鲁棒性 。另一方面,神经网络鲁棒性的相关研究与 GAN 的改进存在内在联系,如深度神经网络经过对抗训练后损失在峰值附近更加平滑,以及在 CNN 中使用 Lipschitz 条件可以使模型同时具有较好的鲁棒性与准确性,该领域的相关研究对于 GAN 的改进有一定的参考借鉴价值,特别是在生成对抗样本质量的评价和生成器的目标研究方面。
- 有研究者从对抗频度和对抗严重程度两方面描述神经网络在数据集上的鲁棒性。其中对抗频度反映数据集上对抗性扰动发生的可能性,对抗严重程度反映扰动发生时导致输出偏离的程度。该方法在 GAN 生成对抗样本数据集质量的评价层面具有借鉴价值,并对生成器的训练具有指导意义。另有研究者提出一种基于符号线性松弛的神经网络安全性分析方法,把对抗性扰动当作安全属性违反的一种约束特例来处理,其框架可以定义 5 种不同的安全属性约束,针对对抗性扰动的结果进行细化。这些工作有助于 GAN 生成器设计目标的分类研究。
+ 有研究者从对抗频度和对抗严重程度两方面描述神经网络在数据集上的鲁棒性。其中对抗频度反映数据集上对抗性扰动发生的可能性,对抗严重程度反映扰动发生时导致输出偏离的程度。该方法在 GAN 生成对抗样本数据集质量的评价层面具有借鉴价值,并对生成器的训练具有指导意义。另有研究者提出一种基于符号线性松弛的神经网络安全性分析方法,把对抗性扰动当作安全属性违反的一种约束特例来处理,其框架可以定义 5 种不同的安全属性约束,针对对抗性扰动的结果进行细化。这些工作有助于 GAN 生成器设计目标的分类研究。
## 8、总结
- 本文从不同方面综述了对抗生成网络(GANs)的现有模型方法。首先根据训练策略、结构变化、训练技巧、监督类型等方面对现有GAN方法进行了分类,并以经典网络为例,分别介绍了不同GAN网络的改进点。接着详细介绍了GAN网络的基本结构,并给出了较新的生成对抗网络发展脉络。最后基于实际应用场景对经典常用的GAN模型进行了介绍。我们选取了Kaggle的五个常用的GAN数据集,并分别进行了介绍。数据集链接分别放置于数据集名称处。最后,针对现阶段的生成对抗网络前沿问题进行了介绍。
+ 本文从不同方面综述了对抗生成网络(GANs)的现有模型方法。首先根据训练策略、结构变化、训练技巧、监督类型等方面对现有GAN方法进行了分类,并以经典网络为例,分别介绍了不同GAN网络的改进点。接着详细介绍了GAN网络的基本结构,并给出了较新的生成对抗网络发展脉络。最后基于实际应用场景对经典常用的GAN模型进行了介绍。我们选取了Kaggle的五个常用的GAN数据集,并分别进行了介绍。数据集链接分别放置于数据集名称处。最后,针对现阶段的生成对抗网络前沿问题进行了介绍。
## 参考文献
@@ -644,4 +644,3 @@ CycleGAN的网络架构如图所示:
[5] [生成对抗网络及其应用研究综述](https://kns.cnki.net/kcms/detail/11.2127.TP.20210720.1804.015.html)
[6] [生成对抗网络研究综述](http://www.infocomm-journal.com/cjnis/CN/10.11959/j.issn.2096-109x.2021080)
-
diff --git a/docs/tutorials/generative_adversarial_network/overview/index.rst b/docs/tutorials/generative_adversarial_network/overview/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/interview_questions/index.rst b/docs/tutorials/interview_questions/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/interview_questions/interview_questions.md b/docs/tutorials/interview_questions/interview_questions.md
old mode 100644
new mode 100755
index 50b38e150..9d96786f6
--- a/docs/tutorials/interview_questions/interview_questions.md
+++ b/docs/tutorials/interview_questions/interview_questions.md
@@ -112,7 +112,7 @@
* [TinyBERT是如何对BERT进行蒸馏的?](https://paddlepedia.readthedocs.io/en/latest/tutorials/model_compress/model_distill/TinyBERT.html)
-
+
# 强化学习
@@ -121,6 +121,3 @@
* [什么是马尔可夫决策过程?](https://paddlepedia.readthedocs.io/en/latest/tutorials/reinforcement_learning/basic_information.html)
* [什么是SARSA?](https://paddlepedia.readthedocs.io/en/latest/tutorials/reinforcement_learning/Sarsa.html#id1)
* [什么是Q-Learning?](https://paddlepedia.readthedocs.io/en/latest/tutorials/reinforcement_learning/Q-learning.html#id1)
-
-
-
diff --git a/docs/tutorials/meta_learning/index.rst b/docs/tutorials/meta_learning/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/meta_learning/metric_based_meta_learning/MN.md b/docs/tutorials/meta_learning/metric_based_meta_learning/MN.md
old mode 100644
new mode 100755
index 591c52760..61ee212d1
--- a/docs/tutorials/meta_learning/metric_based_meta_learning/MN.md
+++ b/docs/tutorials/meta_learning/metric_based_meta_learning/MN.md
@@ -1,6 +1,6 @@
# Matching Network
-Matching Network (MN)
+Matching Network (MN)
结合了度量学习 (Metric Learning) 与记忆增强神经网络 (Memory Augment Neural Networks),
并利用注意力机制与记忆机制加速学习,同时提出了 set-to-set 框架,
使得 MN 能够为新类产生合理的测试标签,且不用网络做任何改变。
@@ -8,7 +8,7 @@ Matching Network (MN)
## 1 MN
-将支持集 $S=\left\{\left(x_{i}, y_{i}\right)\right\}_{i=1}^{k}$
+将支持集 $S=\left\{\left(x_{i}, y_{i}\right)\right\}_{i=1}^{k}$
映射到一个分类器 $c_{S}(\hat{x})$ ,
给定一个测试样本 $\hat{x}$ ,$c_{S}(\hat{x})$ 定义一个关于输出 $\hat{y}$ 的概率分布,即
@@ -32,7 +32,7 @@ $$
上式本质是将一个输入的新类描述为支持集中所有类的一个线性组合,
结合了核密度估计KDE( $a$ 可以看做是一种核密度估计)和 KNN 。
-其中, $k$ 表示支持集中样本类别数,
+其中, $k$ 表示支持集中样本类别数,
$a\left(\hat{x}, x_{i}\right)$ 是注意力机制,
类似 attention 模型中的核函数,
用来度量 $\hat{x}$ 和训练样本 $x_{i}$ 的匹配度。
@@ -43,7 +43,7 @@ $$
a\left(\hat{x}, x_{i}\right)=\frac{e^{c\left(f(\hat{x}), g\left(x_{i}\right)\right)}}{\sum_{j=1}^{k} e^{c\left(f(\hat{x}), g\left(x_{j}\right)\right)}}
$$
-其中, $c(\cdot)$ 表示余弦相似度,
+其中, $c(\cdot)$ 表示余弦相似度,
$f$ 与 $g$ 表示施加在测试样本与训练样本上的嵌入函数 (Embedding Function)。
如果注意力机制是 $X \times X$ 上的核,
@@ -56,7 +56,7 @@ $f$ 与 $g$ 表示施加在测试样本与训练样本上的嵌入函数 (Embedd

-图1 MN 示意图。
+图1 MN 示意图。
@@ -123,14 +123,14 @@ $$
- 将( $K+1$ 个)浅层变量全部输入到 BiLSTM 中,获得 $K+1$ 个输出,
然后使用余弦距离判断前 $K$ 个输出中每个输出与最后一个输出之间的相似度。
-- 根据计算出来的相似度,按照任务 $S$ 中的标签信息 $y_1, y_2, \ldots, y_K$
+- 根据计算出来的相似度,按照任务 $S$ 中的标签信息 $y_1, y_2, \ldots, y_K$
求解目标图片 $\hat{x}$ 的类别标签 $\hat{y}$。
## 5 MN 分类结果
-表1 MN 在 Omniglot 上的分类结果。
+表1 MN 在 Omniglot 上的分类结果。
| Model | Matching Fn | Fine Tune | 5-way 1-shot | 5-way 5-shot | 20-way 1-shot | 20-way 5-shot |
@@ -147,10 +147,10 @@ $$
-表1 MN 在 miniImageNet 上的分类结果。
+表1 MN 在 miniImageNet 上的分类结果。
-| Model | Matching Fn | Fine Tune | 5-way 1-shot | 5-way 5-shot |
+| Model | Matching Fn | Fine Tune | 5-way 1-shot | 5-way 5-shot |
| :----: | :----: | :----: | :----: | :----: |
| PIXELS | Cosine | N | 23.0 $\%$ | 26.6 $\%$ |
| BASELINE CLASSIFIER | Cosine | N | 36.6 $\%$ | 46.0 $\%$ |
@@ -162,7 +162,7 @@ $$
| MATCHING NETS | Cosine (FCE) | Y | **46.6** $\%$ | **60.0** $\%$ |
-## 6 创新点
+## 6 创新点
- 采用匹配的形式实现小样本分类任务,
引入最近邻算法的思想解决了深度学习算法在小样本的条件下无法充分优化参数而导致的过拟合问题,
@@ -183,4 +183,4 @@ $$
## 参考文献
-[1] [Matching Networks for One Shot Learning](https://proceedings.neurips.cc/paper/2016/hash/90e1357833654983612fb05e3ec9148c-Abstract.html)
\ No newline at end of file
+[1] [Matching Networks for One Shot Learning](https://proceedings.neurips.cc/paper/2016/hash/90e1357833654983612fb05e3ec9148c-Abstract.html)
diff --git a/docs/tutorials/meta_learning/metric_based_meta_learning/PN.md b/docs/tutorials/meta_learning/metric_based_meta_learning/PN.md
old mode 100644
new mode 100755
index 53907560f..6989e7e7c
--- a/docs/tutorials/meta_learning/metric_based_meta_learning/PN.md
+++ b/docs/tutorials/meta_learning/metric_based_meta_learning/PN.md
@@ -9,8 +9,8 @@ Prototypical Network (PN) 利用支持集中每个类别提供的少量样本,
在 few-shot 分类任务中,
假设有 $N$ 个标记的样本 $S=\left(x_{1}, y_{1}\right), \ldots,\left(x_{N}, y_{N}\right)$ ,
-其中, $x_{i} \in$ $\mathbb{R}^{D}$ 是 $D$ 维的样本特征向量,
-$y \in 1, \ldots, K$ 是相应的标签。
+其中, $x_{i} \in$ $\mathbb{R}^{D}$ 是 $D$ 维的样本特征向量,
+$y \in 1, \ldots, K$ 是相应的标签。
$S_{K}$ 表示第 $k$ 类样本的集合。
PN 计算每个类的 $M$ 维原型向量 $c_{k} \in \mathbb{R}^{M}$ ,
@@ -46,7 +46,7 @@ PN 示意图如图1所示。

-图1 PN 示意图。
+图1 PN 示意图。
## 2 PN 算法流程
@@ -79,7 +79,7 @@ PN 示意图如图1所示。
## 3 PN 分类结果
-表1 PN 在 Omniglot 上的分类结果。
+表1 PN 在 Omniglot 上的分类结果。
| Model | Dist. | Fine Tune | 5-way 1-shot | 5-way 5-shot | 20-way 1-shot | 20-way 5-shot |
@@ -91,10 +91,10 @@ PN 示意图如图1所示。
| PROTOTYPICAL NETWORKS | Euclid. | N | **98.8** $\%$ | 99.7 $\%$ | **96.0** $\%$ | **98.9** $\%$ |
-表1 PN 在 miniImageNet 上的分类结果。
+表1 PN 在 miniImageNet 上的分类结果。
-| Model | Dist. | Fine Tune | 5-way 1-shot | 5-way 5-shot |
+| Model | Dist. | Fine Tune | 5-way 1-shot | 5-way 5-shot |
| :----: | :----: | :----: | :----: | :----: |
| BASELINE NEAREST NEIGHBORS | Cosine | N | 28.86 $\pm$ 0.54 $\%$ | 49.79 $\pm$ 0.79 $\%$ |
| MATCHING NETWORKS | Cosine | N | 43.40 $\pm$ 0.78 $\%$ | 51.09 $\pm$ 0.71 $\%$ |
@@ -106,4 +106,4 @@ PN 示意图如图1所示。
## 参考文献
-[1] [Prototypical Networks for Few-shot Learning](https://proceedings.neurips.cc/paper/2017/hash/cb8da6767461f2812ae4290eac7cbc42-Abstract.html)
\ No newline at end of file
+[1] [Prototypical Networks for Few-shot Learning](https://proceedings.neurips.cc/paper/2017/hash/cb8da6767461f2812ae4290eac7cbc42-Abstract.html)
diff --git a/docs/tutorials/meta_learning/metric_based_meta_learning/RN.md b/docs/tutorials/meta_learning/metric_based_meta_learning/RN.md
old mode 100644
new mode 100755
index cd6fcb704..f1f17d3ab
--- a/docs/tutorials/meta_learning/metric_based_meta_learning/RN.md
+++ b/docs/tutorials/meta_learning/metric_based_meta_learning/RN.md
@@ -20,20 +20,20 @@ RN 结构如图1所示。
查询集中的样本记为 $\boldsymbol{x}_{j}$。
- 将 $\boldsymbol{x}_{i}$ 和 $\boldsymbol{x}_{j}$ 输入 $f_{\varphi}$ ,
-产生特征映射 $f_{\varphi}\left(\boldsymbol{x}_{i}\right)$
+产生特征映射 $f_{\varphi}\left(\boldsymbol{x}_{i}\right)$
和 $f_{\varphi}\left(\boldsymbol{x}_{j}\right)$ 。
-- 通过运算器 $C(.,.)$ 将 $f_{\varphi}\left(\boldsymbol{x}_{i}\right)$
+- 通过运算器 $C(.,.)$ 将 $f_{\varphi}\left(\boldsymbol{x}_{i}\right)$
和 $f_{\varphi}\left(\boldsymbol{x}_{j}\right)$ 结合,
得到 $C(f_{\varphi}\left(\boldsymbol{x}_{i}\right),f_{\varphi}\left(\boldsymbol{x}_{j}\right))$ 。
- 将 $C(f_{\varphi}\left(\boldsymbol{x}_{i}\right),f_{\varphi}\left(\boldsymbol{x}_{j}\right))$ 输入 $g_{\phi}$,
得到 $[0, 1]$ 范围内的标量,
-表示 $\boldsymbol{x}_{i}$ 和 $\boldsymbol{x}_{j}$ 之间的相似性,记为关系得分 $r_{i, j}$ 。
+表示 $\boldsymbol{x}_{i}$ 和 $\boldsymbol{x}_{j}$ 之间的相似性,记为关系得分 $r_{i, j}$ 。
$\boldsymbol{x}_{i}$ 和 $\boldsymbol{x}_{j}$ 相似度越高,$r_{i, j}$ 越大。
$$
-r_{i, j}=g_{\phi}\left(C\left(f_{\varphi}\left(\boldsymbol{x}_{i}\right), f_{\varphi}\left(\boldsymbol{x}_{j}\right)\right)\right), \
+r_{i, j}=g_{\phi}\left(C\left(f_{\varphi}\left(\boldsymbol{x}_{i}\right), f_{\varphi}\left(\boldsymbol{x}_{j}\right)\right)\right), \
i = 1, 2, ..., C
$$
@@ -65,7 +65,7 @@ $$
- 总共有四个卷积块,前两个卷积块包含 2 $\times$ 2 的最大池化层,后边两个卷积块没有池化层。
-### 3.2 关系模块结构
+### 3.2 关系模块结构
- 有两个卷积块,每个卷积模块中都包含 2 $\times$ 2 的最大池化层。
@@ -96,7 +96,7 @@ $$
表1 RN 在 miniImageNet 上的分类结果。
-| Model | FT | 5-way 1-shot | 5-way 5-shot |
+| Model | FT | 5-way 1-shot | 5-way 5-shot |
| :----: | :----: | :----: | :----: |
| MATCHING NETS | N | 43.56 $\pm$ 0.84 $\%$ | 55.31 $\pm$ 0.73 $\%$ |
| META NETS | N | 49.21 $\pm$ 0.96 $\%$ | -- |
@@ -109,4 +109,4 @@ $$
## 参考文献
-[1] [Learning to Compare: Relation Network for Few-Shot Learning](https://openaccess.thecvf.com/content_cvpr_2018/html/Sung_Learning_to_Compare_CVPR_2018_paper.html)
\ No newline at end of file
+[1] [Learning to Compare: Relation Network for Few-Shot Learning](https://openaccess.thecvf.com/content_cvpr_2018/html/Sung_Learning_to_Compare_CVPR_2018_paper.html)
diff --git a/docs/tutorials/meta_learning/metric_based_meta_learning/SNAIL.md b/docs/tutorials/meta_learning/metric_based_meta_learning/SNAIL.md
old mode 100644
new mode 100755
index fab7f2355..9f0e58e3a
--- a/docs/tutorials/meta_learning/metric_based_meta_learning/SNAIL.md
+++ b/docs/tutorials/meta_learning/metric_based_meta_learning/SNAIL.md
@@ -4,7 +4,7 @@
在现存的方法中,元学习器的瓶颈是如何去吸收同化利用过去的经验。
注意力机制可以允许在历史中精准摘取某段具体的信息。
-Simple Neural Attentive Learner (SNAIL)
+Simple Neural Attentive Learner (SNAIL)
组合时序卷积和 soft-attention,
前者从过去的经验整合信息,后者精确查找到某些特殊的信息。
@@ -67,7 +67,7 @@ SNAIL 接收标注样本 $\left(x_{1}, y_{1}\right), \ldots,\left(x_{t-1}, y_{t-

-图1 SNAIL 基础结构示意图。
+图1 SNAIL 基础结构示意图。
### 2.2 Modular Building Blocks
@@ -77,10 +77,10 @@ Dense Block 和 Attention Block。

-图1 SNAIL 中的 Dense Block 和 Attention Block。(a) Dense Block 应用因果一维卷积,然后将输出连接到输入。TC Block 应用一系列膨胀率呈指数增长的 Dense Block。(b) Attention Block 执行(因果)键值查找,并将输出连接到输入。
+图1 SNAIL 中的 Dense Block 和 Attention Block。(a) Dense Block 应用因果一维卷积,然后将输出连接到输入。TC Block 应用一系列膨胀率呈指数增长的 Dense Block。(b) Attention Block 执行(因果)键值查找,并将输出连接到输入。
-**Densen Block**
+**Densen Block**
用了一个简单的因果一维卷积(空洞卷积),
其中膨胀率 (dilation)为 $R$ 和卷积核数量 $D$ ([1] 对于所有的实验中设置卷积核的大小为2),
最后合并结果和输入。
@@ -120,7 +120,7 @@ $$
## 3 SNAIL 分类结果
-表1 SNAIL 在 Omniglot 上的分类结果。
+表1 SNAIL 在 Omniglot 上的分类结果。
| Method | 5-way 1-shot | 5-way 5-shot | 20-way 1-shot | 20-way 5-shot |
@@ -134,7 +134,7 @@ $$
| SNAIL | **99.07 $\pm$ 0.16** $\%$ | **99.78 $\pm$ 0.09** $\%$ | **97.64 $\pm$ 0.30** $\%$ | **99.36 $\pm$ 0.18** $\%$ |
-表1 SNAIL 在 miniImageNet 上的分类结果。
+表1 SNAIL 在 miniImageNet 上的分类结果。
| Method | 5-way 1-shot | 5-way 5-shot |
@@ -148,4 +148,4 @@ $$
## 参考文献
-[1] [A Simple Neural Attentive Meta-Learner](https://openreview.net/forum?id=B1DmUzWAW)
\ No newline at end of file
+[1] [A Simple Neural Attentive Meta-Learner](https://openreview.net/forum?id=B1DmUzWAW)
diff --git a/docs/tutorials/meta_learning/metric_based_meta_learning/index.rst b/docs/tutorials/meta_learning/metric_based_meta_learning/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/meta_learning/model_based_meta_learning/Learning_to_Learn.md b/docs/tutorials/meta_learning/model_based_meta_learning/Learning_to_Learn.md
old mode 100644
new mode 100755
index ffa746233..62bac1fd1
--- a/docs/tutorials/meta_learning/model_based_meta_learning/Learning_to_Learn.md
+++ b/docs/tutorials/meta_learning/model_based_meta_learning/Learning_to_Learn.md
@@ -1,6 +1,6 @@
# Learning to Learn
-Learning to Learn by Gradient Descent by Gradient Descent
+Learning to Learn by Gradient Descent by Gradient Descent
提出了一种全新的优化策略,
用 LSTM 替代传统优化方法学习一个针对特定任务的优化器。
@@ -46,7 +46,7 @@ optimizer $g$ 的更新则由 $f$, $\nabla f$ 及 $\phi$ 决定。

-图1 Learning to Learn 中 optimizer 和 optimizee 工作原理。
+图1 Learning to Learn 中 optimizer 和 optimizee 工作原理。
optimizer 为 optimizee 提供更新策略,
optimizee 将损失信息反馈给 optimizer,协助 optimizer 更新。
@@ -72,7 +72,7 @@ $\nabla_{t}=\nabla_{\theta} f\left(\theta_{t}\right)$ 。

-图1 Learning to Learn 计算图。
+图1 Learning to Learn 计算图。
梯度只沿实线传递,不沿虚线传递(因为 optimizee 的梯度不依赖于 optimizer 的参数,即
$\partial \nabla_{t} / \partial \phi = 0$ ),这样可以避免计算 $f$ 的二阶导。
@@ -101,7 +101,7 @@ optimizer 的参数 $\phi$ 共享,隐层状态 $h_{i}$ 不共享。

-图3 LSTM 优化器的一步更新过程。所有 LSTM 的 $\phi$ 共享,$h_{i}$ 不共享。
+图3 LSTM 优化器的一步更新过程。所有 LSTM 的 $\phi$ 共享,$h_{i}$ 不共享。
### 2.3 预处理和后处理
@@ -115,11 +115,11 @@ $$
其中, $p>0$ 为任意一个参数([1] 取 $p=10$),用来裁剪梯度。
如果第一个参数的取值大于 $-1$ ,
-那么它就代表梯度的 $\log$ ,第二个参数则是它的符号。
+那么它就代表梯度的 $\log$ ,第二个参数则是它的符号。
如果第一个参数的取值等于 $-1$ ,
那么它将作为一个标记指引神经网络寻找第二个参数,此时第二个参数就是对梯度的缩放。
## 参考文献
-[1] [Learning to Learn by Gradient Descent by Gradient Descent](https://proceedings.neurips.cc/paper/2016/hash/fb87582825f9d28a8d42c5e5e5e8b23d-Abstract.html)
\ No newline at end of file
+[1] [Learning to Learn by Gradient Descent by Gradient Descent](https://proceedings.neurips.cc/paper/2016/hash/fb87582825f9d28a8d42c5e5e5e8b23d-Abstract.html)
diff --git a/docs/tutorials/meta_learning/model_based_meta_learning/Meta_Learner_LSTM.md b/docs/tutorials/meta_learning/model_based_meta_learning/Meta_Learner_LSTM.md
old mode 100644
new mode 100755
index d8d85252d..4ad850f58
--- a/docs/tutorials/meta_learning/model_based_meta_learning/Meta_Learner_LSTM.md
+++ b/docs/tutorials/meta_learning/model_based_meta_learning/Meta_Learner_LSTM.md
@@ -115,22 +115,22 @@ Meta-Learner 使用 Learner 提供的信息,更新 Learner 中的参数和自
**Meta-Learner LSTM 算法流程**
-> 1. $\Theta_{0}$ $\leftarrow$ random initialization
+> 1. $\Theta_{0}$ $\leftarrow$ random initialization
>
-> 2. for $d=1,...,n$ do:
-> 1. $D_{\mathrm{train}}$, $D_{\mathrm{test}}$ $\leftarrow$ random dataset from ${D}_{\mathrm{meta-train}}$
-> 2. intialize learner parameters: $\theta_{0} \leftarrow c_{0}$
+> 2. for $d=1,...,n$ do:
+> 1. $D_{\mathrm{train}}$, $D_{\mathrm{test}}$ $\leftarrow$ random dataset from ${D}_{\mathrm{meta-train}}$
+> 2. intialize learner parameters: $\theta_{0} \leftarrow c_{0}$
>
> 3. for $t=1,...,T$ do:
-> 1. $\mathbf{X}_{t}$, $\mathbf{Y}_{t}$ $\leftarrow$ random batch from $D_{\mathrm{train}}$
-> 2. get loss of learner on train batch: $\mathcal{L}_{t} \leftarrow \mathcal{L}\left(M\left(\mathbf{X}_{t} ; \theta_{t-1}\right), \mathbf{Y}_{t}\right)$
-> 3. get output of meta-learner using Eq. (2): $c_{t} \leftarrow R\left(\left(\nabla_{\theta_{t-1}} \mathcal{L}_{t}, \mathcal{L}_{t}\right) ; \Theta_{d-1}\right)$
-> 4. update learner parameters: $\theta_{t} \leftarrow c_{t}$
-> 4. end for
+> 1. $\mathbf{X}_{t}$, $\mathbf{Y}_{t}$ $\leftarrow$ random batch from $D_{\mathrm{train}}$
+> 2. get loss of learner on train batch: $\mathcal{L}_{t} \leftarrow \mathcal{L}\left(M\left(\mathbf{X}_{t} ; \theta_{t-1}\right), \mathbf{Y}_{t}\right)$
+> 3. get output of meta-learner using Eq. (2): $c_{t} \leftarrow R\left(\left(\nabla_{\theta_{t-1}} \mathcal{L}_{t}, \mathcal{L}_{t}\right) ; \Theta_{d-1}\right)$
+> 4. update learner parameters: $\theta_{t} \leftarrow c_{t}$
+> 4. end for
>
-> 5. $\mathbf{X}, \mathbf{Y} \leftarrow D_{\mathrm{test}}$
-> 6. get loss of learner on test batch: ${L}_\mathrm{test} \leftarrow {L}\left(M\left(\mathbf{X} ; \theta_{T}\right), \mathbf{Y}\right)$
-> 7. update $\Theta_{d}$ using $\nabla_{\Theta_{d-1}} {L}_{\mathrm{test}}$
+> 5. $\mathbf{X}, \mathbf{Y} \leftarrow D_{\mathrm{test}}$
+> 6. get loss of learner on test batch: ${L}_\mathrm{test} \leftarrow {L}\left(M\left(\mathbf{X} ; \theta_{T}\right), \mathbf{Y}\right)$
+> 7. update $\Theta_{d}$ using $\nabla_{\Theta_{d-1}} {L}_{\mathrm{test}}$
> 3. end for
@@ -159,7 +159,7 @@ Meta-Learner LSTM 是一个两层的 LSTM 网络,第一层是正常的 LSTM
- 在 MAML 中,元学习器使用 SGD 更新参数初始值,使得损失函数中存在高阶导数;
在 Meta-Learner LSTM 中,元学习器给基学习器提供修改的 LSTM 更新参数,元学习器自身的参数并不是基学习器中的参数初始值,元学习器自身的参数使用 SGD 进行更新,并不会出现损失函数高阶导数的计算。
-- 在 MAML 中,元学习器和基学习器只在每个任务训练完成后才进行信息交流;
+- 在 MAML 中,元学习器和基学习器只在每个任务训练完成后才进行信息交流;
在 Meta-Learner LSTM 中,元学习器和基学习器在每个任务的每个批次训练数据完成后就进行信息交流。
- MAML 适用于任意模型结构;
diff --git a/docs/tutorials/meta_learning/model_based_meta_learning/index.rst b/docs/tutorials/meta_learning/model_based_meta_learning/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/meta_learning/optimization_based_meta_learning/LEO.md b/docs/tutorials/meta_learning/optimization_based_meta_learning/LEO.md
old mode 100644
new mode 100755
index cbbe8c139..08ee9ab19
--- a/docs/tutorials/meta_learning/optimization_based_meta_learning/LEO.md
+++ b/docs/tutorials/meta_learning/optimization_based_meta_learning/LEO.md
@@ -12,10 +12,10 @@ LEO 结构如图1所示。

-图1 LEO 结构图。$D^{\mathrm{tr}}$ 是任务 $\varepsilon$ 的 support set,
+图1 LEO 结构图。$D^{\mathrm{tr}}$ 是任务 $\varepsilon$ 的 support set,
$D^{\mathrm{val}}$ 是任务 $\varepsilon$ 的 query set,
-$z$ 是通过编码器计算的 $N$ 个类别的类别特征,$f_{\theta}$ 是基学习器,
-$\theta$ 是基学习器参数,
+$z$ 是通过编码器计算的 $N$ 个类别的类别特征,$f_{\theta}$ 是基学习器,
+$\theta$ 是基学习器参数,
$L^{\mathrm{tr}}=f_{\theta}\left( D^{\mathrm{tr}}\right)$, $L^{\mathrm{val}}=f_{\theta}\left( D^{\mathrm{val}}\right)$。
LEO 包括基础学习器和元学习器,还包括编码器和解码器。
@@ -60,7 +60,7 @@ $$
\end{aligned}
$$
-其中,$N$ 是类别总数, $K$ 是每个类别的图片总数,
+其中,$N$ 是类别总数, $K$ 是每个类别的图片总数,
${D}_{n}^{\mathrm{tr}}$ 是第 $n$ 个类别的训练数据集。
对于每个类别的输入数据,每个类别下有 $K$ 张图片,
计算这 $K$ 张图片和所有已知图片之间的距离。
@@ -69,7 +69,7 @@ ${D}_{n}^{\mathrm{tr}}$ 是第 $n$ 个类别的训练数据集。
### 2.2 解码器
解码器 $g_{\phi_{d}}$ ,其中 $\phi_{d}$ 是解码器的可训练参数,
-其功能是将每个类别输入数据的特征向量 $z_{n}$
+其功能是将每个类别输入数据的特征向量 $z_{n}$
映射成属于每个类别的概率值 $\boldsymbol{w}_{n}$:
$$
@@ -87,7 +87,7 @@ $g_{\phi_{d}}$ 是从特征向量到基础学习器参数的映射。

-图2 LEO 基础学习器工作原理图。
+图2 LEO 基础学习器工作原理图。
### 2.3 基础学习器更新过程
@@ -109,7 +109,7 @@ $$
其中,$\boldsymbol{z}_{n}^{\prime}$ 是更新后的特征向量,
对应的是更新后的任务专属参数 $\boldsymbol{\theta}_{\varepsilon}^{\prime}$。
基础学习器使用 $\theta_{\varepsilon}^{\prime}$ 来预测任务验证集数据的标注,
-将任务 $\varepsilon$ 的验证集 $\mathrm{D}_{\varepsilon}^{\mathrm{val}}$
+将任务 $\varepsilon$ 的验证集 $\mathrm{D}_{\varepsilon}^{\mathrm{val}}$
损失函数 $L_{\varepsilon}^{\mathrm{val}}\left(f_{\theta_{\varepsilon}^{\prime}}\right)$ 、
更新后的特征向量 $z_{n}^{\prime}$、
更新后的任务专属参数 $\theta_{\varepsilon}^{\prime}$ 输入元学习器,
@@ -124,7 +124,7 @@ $$
$$
其中, $L_{\varepsilon}^{\mathrm{val}}\left(f_{\theta_{\varepsilon}^{\prime}}\right)$ 是任务 $\varepsilon$ 验证集的损失函数,
-衡量了基础学习器模型的泛化误差,损失函数越小,模型的泛化能力越好。
+衡量了基础学习器模型的泛化误差,损失函数越小,模型的泛化能力越好。
$p\left(z_{n}\right)=N(0, I)$ 是高斯分布,$D_{\mathrm{KL}}\left\{q\left(z_{n} \mid {D}_{n}^{\mathrm{tr}}\right) \| p\left(z_{n}\right)\right\}$ 是近似后验分布 $q\left(z_{n} \mid D_{n}^{\text {tr }}\right)$ 与先验分布 $p\left(z_{n}\right)$ 之间的 KL 距离 (KL-Divergence),
最小化 $\mathrm{KL}$ 距离可使后验分布 $q\left(z_{n} \mid {D}_{n}^{\text {tr}}\right)$ 的估计尽可能准确。
最小化距离 $\left\|s\left(z_{n}^{\prime}\right)-z_{n}\right\|$ 使得参数初始值 $z_{n}$ 和训练完成后的参数更新值 $z_{n}^{\prime}$ 距离最小,
@@ -137,7 +137,7 @@ $$
其中, $\left\|\phi_{r}\right\|_{2}^{2}$ 指的是调控元参数的个数和大小,
${C}_{d}$ 是参数 $\phi_{d}$ 的行和行之间的相关性矩阵,
- 超参数 $\lambda_{1},\lambda_{2}>0$,
+ 超参数 $\lambda_{1},\lambda_{2}>0$,
$\left\|C_{d}-\mathbb{I}\right\|_{2}$ 使得 $C_{d}$ 接近单位矩阵,
使得参数 $\phi_{d}$ 的行和行之间的相关性不能太大,
每个类别的特征向量之间的相关性不能太大,
@@ -154,24 +154,24 @@ $$
> 1. sample task instance $\mathcal{T}_{i} \sim \mathcal{S}^{t r}$
> 2. let $\left(\mathcal{D}^{t r}, \mathcal{D}^{v a l}\right)=\mathcal{T}_{i}$
> 3. encode $\mathcal{D}^{t r}$ to z using $g_{\phi_{e}}$ and $g_{\phi_{r}}$
-> 4. decode $\mathbf{z}$ to initial params $\theta_{i}$ using $g_{\phi_{d}}$
+> 4. decode $\mathbf{z}$ to initial params $\theta_{i}$ using $g_{\phi_{d}}$
> 5. initialize $\mathbf{z}^{\prime}=\mathbf{z}, \theta_{i}^{\prime}=\theta_{i}$
> 6. for number of adaptation steps do:
> 1. compute training loss $\mathcal{L}_{\mathcal{T}_{i}}^{t r}\left(f_{\theta_{i}^{\prime}}\right)$
> 2. perform gradient step w.r.t. $\mathbf{z}^{\prime}$:
> 3. $\mathbf{z}^{\prime} \leftarrow \mathbf{z}^{\prime}-\alpha \nabla_{\mathbf{z}^{\prime}} \mathcal{L}_{\mathcal{T}_{i}}^{t r}\left(f_{\theta_{i}^{\prime}}\right)$
-> 4. decode $\mathbf{z}^{\prime}$ to obtain $\theta_{i}^{\prime}$ using $g_{\phi_{d}}$
+> 4. decode $\mathbf{z}^{\prime}$ to obtain $\theta_{i}^{\prime}$ using $g_{\phi_{d}}$
> 7. end for
-> 8. compute validation loss $\mathcal{L}_{\mathcal{T}_{i}}^{v a l}\left(f_{\theta_{i}^{\prime}}\right)$
+> 8. compute validation loss $\mathcal{L}_{\mathcal{T}_{i}}^{v a l}\left(f_{\theta_{i}^{\prime}}\right)$
> 2. end for
-> 3. perform gradient step w.r.t $\phi$:$\phi \leftarrow \phi-\eta \nabla_{\phi} \sum_{\mathcal{T}_{i}} \mathcal{L}_{\mathcal{T}_{i}}^{v a l}\left(f_{\theta_{i}^{\prime}}\right)$
+> 3. perform gradient step w.r.t $\phi$:$\phi \leftarrow \phi-\eta \nabla_{\phi} \sum_{\mathcal{T}_{i}} \mathcal{L}_{\mathcal{T}_{i}}^{v a l}\left(f_{\theta_{i}^{\prime}}\right)$
> 4. end while
-(1) 初始化元参数:编码器参数 $\phi_{e}$、关系网络参数 $\phi_{r}$、解码器参数 $\phi_{d}$,
+(1) 初始化元参数:编码器参数 $\phi_{e}$、关系网络参数 $\phi_{r}$、解码器参数 $\phi_{d}$,
在元学习器中更新的元参数包括 $\phi=\left\{\phi_e, \phi_r,\phi_d \right\}$。
(2) 使用片段式训练模式,
-随机抽取任务 $\varepsilon$, ${D}_{\varepsilon}^{\mathrm{tr}}$ 是任务 $\varepsilon$ 的训练集,
+随机抽取任务 $\varepsilon$, ${D}_{\varepsilon}^{\mathrm{tr}}$ 是任务 $\varepsilon$ 的训练集,
${D}_{\varepsilon}^{\mathrm{val}}$ 是任务 $\varepsilon$ 的验证集。
(3) 使用编码器 $g_{\phi_{e}}$ 和关系网络 $g_{\phi_{r}}$ 将任务 $\varepsilon$ 的训练集 $D_{\varepsilon}^{\mathrm{tr}}$ 编码成特征向量 $z$,
@@ -197,7 +197,7 @@ $$
LEO 是一种与模型无关的元学习,[1] 中给出的各部分模型结构及参数如表1所示。
-表1 LEO 各部分模型结构及参数。
+表1 LEO 各部分模型结构及参数。
| Part of the model | Architecture | Hiddenlayer | Shape of the output |
@@ -211,7 +211,7 @@ LEO 是一种与模型无关的元学习,[1] 中给出的各部分模型结构
## 6 LEO 分类结果
-表1 LEO 在 miniImageNet 上的分类结果。
+表1 LEO 在 miniImageNet 上的分类结果。
| Model | 5-way 1-shot | 5-way 5-shot |
@@ -233,7 +233,7 @@ LEO 是一种与模型无关的元学习,[1] 中给出的各部分模型结构
| LEO | **61.76 $\pm$ 0.08** $\%$ | **77.59 $\pm$ 0.12** $\%$ |
-表1 LEO 在 tieredImageNet 上的分类结果。
+表1 LEO 在 tieredImageNet 上的分类结果。
| Model | 5-way 1-shot | 5-way 5-shot |
@@ -257,5 +257,3 @@ LEO 是一种与模型无关的元学习,[1] 中给出的各部分模型结构
## 参考文献
[1] [Meta-Learning with Latent Embedding Optimization](https://openreview.net/forum?id=BJgklhAcK7)
-
-
diff --git a/docs/tutorials/meta_learning/optimization_based_meta_learning/Reptile.md b/docs/tutorials/meta_learning/optimization_based_meta_learning/Reptile.md
old mode 100644
new mode 100755
index 30d4fecc0..eacff7233
--- a/docs/tutorials/meta_learning/optimization_based_meta_learning/Reptile.md
+++ b/docs/tutorials/meta_learning/optimization_based_meta_learning/Reptile.md
@@ -10,13 +10,13 @@ Reptil 是 MAML 的特例、近似和简化,主要解决 MAML 元学习器中
来近似损失函数对参数初始值的导数,进行参数初始值的更新,从而不会出现任务损失函数的二阶导数。
Peptile 有两个版本:Serial Version 和 Batched Version,两者的差异如下:
-
+
## 1 Serial Version Reptile
单次更新的 Reptile,每次训练完一个任务的基学习器,就更新一次元学习器中的参数初始值。
-(1) 任务上的基学习器记为 $f_{\phi}$ ,其中 $\phi$ 是基学习器中可训练的参数,
+(1) 任务上的基学习器记为 $f_{\phi}$ ,其中 $\phi$ 是基学习器中可训练的参数,
$\theta$ 是元学习器提供给基学习器的参数初始值。
在任务 $T_{i}$ 上,基学习器的损失函数是 $L_{T_{i}}\left(f_{\phi}\right)$ ,
基学习器中的参数经过 $N$ 次迭代更新得到参数估计值:
@@ -92,4 +92,4 @@ $$
## 参考文献
-[1] [Reptile: a Scalable Metalearning Algorithm](https://arxiv.org/abs/1803.02999v1)
\ No newline at end of file
+[1] [Reptile: a Scalable Metalearning Algorithm](https://arxiv.org/abs/1803.02999v1)
diff --git a/docs/tutorials/meta_learning/optimization_based_meta_learning/index.rst b/docs/tutorials/meta_learning/optimization_based_meta_learning/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/meta_learning/preliminaries.md b/docs/tutorials/meta_learning/preliminaries.md
old mode 100644
new mode 100755
index d12747e1c..3eb8f3aa5
--- a/docs/tutorials/meta_learning/preliminaries.md
+++ b/docs/tutorials/meta_learning/preliminaries.md
@@ -15,7 +15,7 @@
元学习的含义有两层,
第一层是让机器学会学习,使其具备分析和解决问题的能力,
-机器通过完成任务获取经验,提高完成任务的能力;
+机器通过完成任务获取经验,提高完成任务的能力;
第二层是让机器学习模型可以更好地泛化到新领域中,
从而完成差异很大的新任务。
@@ -37,7 +37,7 @@ Few-Shot Learning 是 Meta-Learning 在监督学习领域的应用。
遇到的新任务称为元测试任务 (meta-test task)。
每个任务都有自己的训练集和测试集,
内部的训练集和测试集一般称为支持集 (Support Set) 和查询集 (Query Set)。
-支持集又是一个 N-Way K-Shot 问题,即有 N 个类别,每个类有 K 个样例。
+支持集又是一个 N-Way K-Shot 问题,即有 N 个类别,每个类有 K 个样例。

@@ -66,7 +66,7 @@ Few-Shot Learning 是 Meta-Learning 在监督学习领域的应用。

-图3 基学习器和元学习器。元学习器总结任务经验进行任务之间的共性学习,同时指导基学习器对新任务进行特性学习。
+图3 基学习器和元学习器。元学习器总结任务经验进行任务之间的共性学习,同时指导基学习器对新任务进行特性学习。
### 4.1 基学习器
@@ -101,14 +101,14 @@ Few-Shot Learning 是 Meta-Learning 在监督学习领域的应用。
## 5 元学习工作原理
-元学习的主要目的是寻找元学习器 $F$,
+元学习的主要目的是寻找元学习器 $F$,
在 $F$ 的指导下基学习器 $f$ 在支持集 (support set) $D^{\mathrm{tr}}$ 的作用下经过几步微调就可以得到适应当前新任务的最优状态 $f^{*}$。而 $F$ 的优化需要当前所有任务损失的累计和,
即 $\nabla\sum_{n=1}^{N} l \left( f_{n}^{*}, D_{n}^{\mathrm{te}} \right)$。
元学习工作原理如图4所示。

-图4 元学习工作原理。
+图4 元学习工作原理。
### 5.1 元学习训练过程
diff --git a/docs/tutorials/model_compress/index.rst b/docs/tutorials/model_compress/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/model_compress/model_compress.md b/docs/tutorials/model_compress/model_compress.md
old mode 100644
new mode 100755
index e8da875fe..cb7773901
--- a/docs/tutorials/model_compress/model_compress.md
+++ b/docs/tutorials/model_compress/model_compress.md
@@ -6,7 +6,7 @@
* 首先是速度,比如像人脸闸机、人脸解锁手机等应用,对响应速度比较敏感,需要做到实时响应。
* 其次是存储,比如电网周边环境监测这个应用场景中,要图像目标检测模型部署在可用内存只有200M的监控设备上,且当监控程序运行后,剩余内存会小于30M。
-* 最后是耗能,离线翻译这种移动设备内置AI模型的能耗直接决定了它的续航能力。
+* 最后是耗能,离线翻译这种移动设备内置AI模型的能耗直接决定了它的续航能力。

@@ -34,4 +34,4 @@
- 蒸馏:类似“老师教学生”,使用一个效果好的大模型指导一个小模型训练,因为大模型可以提供更多的软分类信息量,所以会训练出一个效果接近大模型的小模型。
- 神经网络架构搜索(NAS):类似“化学结构式的重构”,以模型大小和推理速度为约束进行模型结构搜索,从而获得更高效的网络结构。
-除此以外,还有权重共享、低秩分解等技术也可实现模型压缩。
\ No newline at end of file
+除此以外,还有权重共享、低秩分解等技术也可实现模型压缩。
diff --git a/docs/tutorials/model_compress/model_distill/DistilBERT.md b/docs/tutorials/model_compress/model_distill/DistilBERT.md
old mode 100644
new mode 100755
index aa6cf4c15..b222ff728
--- a/docs/tutorials/model_compress/model_distill/DistilBERT.md
+++ b/docs/tutorials/model_compress/model_distill/DistilBERT.md
@@ -45,4 +45,4 @@ $$
图2:在GLUE数据集上的测试结果、下游任务测试和参数量对比<
-根据上图我们可以看到,DistilBERT与BERT相比减少了40%的参数,同时保留了BERT 97%的性能,但提高了60%的速度。
\ No newline at end of file
+根据上图我们可以看到,DistilBERT与BERT相比减少了40%的参数,同时保留了BERT 97%的性能,但提高了60%的速度。
diff --git a/docs/tutorials/model_compress/model_distill/DynaBERT.md b/docs/tutorials/model_compress/model_distill/DynaBERT.md
old mode 100644
new mode 100755
index 297e90a86..0701bed5c
--- a/docs/tutorials/model_compress/model_distill/DynaBERT.md
+++ b/docs/tutorials/model_compress/model_distill/DynaBERT.md
@@ -76,4 +76,3 @@ $$
图4:Comparison of #parameters, FLOPs, latency on GPU and CPU between DynaBERT and DynaRoBERTa and other methods.
可以看到论文中提出的DynaBERT和DynaRoBERTa可以达到和 $BERT_{BASE}$ 及 $DynaRoBERTa$ 相当的精度,但是通常包含更少的参数,FLOPs或更低的延迟。在相同效率的约束下,从DynaBERT中提取的子网性能优于DistilBERT和TinyBERT。
-
diff --git a/docs/tutorials/model_compress/model_distill/Patient-KD.md b/docs/tutorials/model_compress/model_distill/Patient-KD.md
old mode 100644
new mode 100755
index 6890b3c8e..b01755bbe
--- a/docs/tutorials/model_compress/model_distill/Patient-KD.md
+++ b/docs/tutorials/model_compress/model_distill/Patient-KD.md
@@ -81,4 +81,3 @@ $$
图5: 参数量和推理时间对比
图5展示了$BERT_3$、$BERT_6$、$BERT_12$的推理时间即参数量, 实验表明Patient-KD方法实现了几乎线性的加速,$BERT_6$和$BERT_3$分别提速1.94倍和3.73倍。
-
diff --git a/docs/tutorials/model_compress/model_distill/TinyBERT.md b/docs/tutorials/model_compress/model_distill/TinyBERT.md
old mode 100644
new mode 100755
index c2a09a5e8..920f0198c
--- a/docs/tutorials/model_compress/model_distill/TinyBERT.md
+++ b/docs/tutorials/model_compress/model_distill/TinyBERT.md
@@ -97,7 +97,7 @@ $$
$$
\begin{equation}
-L_{layer} =
+L_{layer} =
\left\{
\begin{array}{lr}
L_{embd}, & m=0 \\
@@ -116,4 +116,3 @@ $$
图3: Results evaluated on GLUE benchmark
作者在GLUE基准上评估了TinyBERT的性能,模型大小、推理时间速度和准确率如图3所示。实验结果表明,TinyBERT在所有GLUE任务上都优于 $BERT_{TINY}$,并在平均性能上获得6.8%的提升。这表明论文中提出的知识整理学习框架可以有效的提升小模型在下游任务中的性能。同时,$TinyBERT_4$ 以~4%的幅度显著的提升了KD SOTA基准线(比如,BERT-PKD和DistilBERT),参数缩小至~28%,推理速度提升3.1倍。与teacher $BERT_{base}$ 相比,TinyBERT在保持良好性能的同时,模型缩小7.5倍,速度提升9.4倍。
-
diff --git a/docs/tutorials/model_compress/model_distill/index.rst b/docs/tutorials/model_compress/model_distill/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/model_deployment/index.rst b/docs/tutorials/model_deployment/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/natural_language_processing/N-Gram.md b/docs/tutorials/natural_language_processing/N-Gram.md
old mode 100644
new mode 100755
index 5d94224e9..95f06494a
--- a/docs/tutorials/natural_language_processing/N-Gram.md
+++ b/docs/tutorials/natural_language_processing/N-Gram.md
@@ -58,6 +58,3 @@ $$p(Sam|am)=0.5$$
$$p(do|I)=0.33$$
等等
-
-
-
diff --git a/docs/tutorials/natural_language_processing/SimCSE.md b/docs/tutorials/natural_language_processing/SimCSE.md
old mode 100644
new mode 100755
index 14570a24f..d4c1f292b
--- a/docs/tutorials/natural_language_processing/SimCSE.md
+++ b/docs/tutorials/natural_language_processing/SimCSE.md
@@ -66,13 +66,13 @@ $l_{i}=-\log\frac{e^{sim(h_{i}^{z_i},h_{i}^{^{z_i^{’}}})/\tau}}{\sum_{j=1}^{N}
相似的问题对为正样本,如下:
```
正样本
-How can I improve my communication and verbal skills?
+How can I improve my communication and verbal skills?
What should we do to improve communication skills?
```
不相似的问题对为负样本,如下:
```
负样本
-Why are you so sexy?
+Why are you so sexy?
How sexy are you?
```
- Flickr30k:每个图像都有5个人进行描述,可以认为同一图像的任意两个描述为一对$(x_{i}, x_{i}^{+})$数据对;
@@ -101,17 +101,17 @@ A man with a yellow tie looks concerned.
自然语言推理数据集,包含蕴含、矛盾和中立;蕴含表示第二句话可以推理出第一句话;矛盾表示第二句话不能推理出第一句话;中立表示两句话无关;举例如下:
```
蕴含样本
-well you see that on television also
+well you see that on television also
You can see that on television, as well.
```
```
矛盾样本
-but that takes too much planning
+but that takes too much planning
It doesn't take much planning.
```
```
中立样本
-Conceptually cream skimming has two basic dimensions - product and geography.
+Conceptually cream skimming has two basic dimensions - product and geography.
Product and geography are what make cream skimming work.
```
@@ -163,6 +163,3 @@ SimCSE论文,在无监督句向量表征上,通过简单的dropout方式,
## 8.参考文献
- [SimCSE: Simple Contrastive Learning of Sentence Embeddings](https://aclanthology.org/2021.emnlp-main.552.pdf)
-
-
-
diff --git a/docs/tutorials/natural_language_processing/index.rst b/docs/tutorials/natural_language_processing/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/natural_language_processing/ner/bilstm_crf.md b/docs/tutorials/natural_language_processing/ner/bilstm_crf.md
old mode 100644
new mode 100755
index 3a269070b..00890db4f
--- a/docs/tutorials/natural_language_processing/ner/bilstm_crf.md
+++ b/docs/tutorials/natural_language_processing/ner/bilstm_crf.md
@@ -119,7 +119,7 @@ $Seq_t = T_{I-Person,B-Person} + T_{O,I-Person} + T_{O,O} + T_{O,B-Organization
**图5**展示了CRF的工作图,现在我们有一串输入$x=[x_0, x_1, x_2, x_n]$(这里的$x$是文本串对应的发射分数,每个字词$x_i$都对应着一个发射分数向量,也就是前边提到的标签向量,该向量的维度就是标签数量),期待解码出相应的标签序列$y=[y_0, y_1,y_2, ..., y_n]$,形式化为对应的条件概率公式如下:
-$$(y|x) = P(y_0,y_1,...,y_n|x_0,x_1,...,x_n)$$
+$$(y|x) = P(y_0,y_1,...,y_n|x_0,x_1,...,x_n)$$
在第2节我们提到,CRF的解码策略在所有可能的路径中,找出得出概率最大,效果最优的一条路径,那这个标签序列就是模型的输出,假设标签数量是$k$,文本长度是$n$,显然会有$N=k^n$条路径,若用$S_i$代表第$i$条路径的分数,那我们可以这样去算一个标签序列出现的概率:
@@ -275,7 +275,7 @@ $$
$$
\begin{align}
-alpha_2 &= [log(e^{scores[0,0]} + e^{scores[0,1]}), log(e^{scores[1,0]} + e^{scores[1,1]})] \\ &=[log(e^{log(e^{x_{00}+t_{00}+x_{10}}+e^{x_{01}+t_{01}+x_{10}})+t_{00}+x_{20}} + e^{log(e^{x_{00}+t_{10}+x_{11}}+e^{x_{01}+t_{11}+x_{11}})+t_{01}+x_{20}}), log(e^{log(e^{x_{00}+t_{00}+x_{10}}+e^{x_{01}+t_{01}+x_{10}})+t_{10}+x_{21}} + e^{log(e^{x_{00}+t_{10}+x_{11}}+e^{x_{01}+t_{11}+x_{11}})+t_{11}+x_{21}})] \\ &=[log((e^{x_{00}+t_{00}+x_{10}}+e^{x_{01}+t_{01}+x_{10}})e^{t_{00}+x_{20}}+(e^{x_{00}+t_{10}+x_{11}}+e^{x_{01}+t_{11}+x_{11}})e^{t_{01}+x_{20}}), log((e^{x_{00}+t_{00}+x_{10}}+e^{x_{01}+t_{01}+x_{10}})e^{t_{10}+x_{21}}+(e^{x_{00}+t_{10}+x_{11}}+e^{x_{01}+t_{11}+x_{11}})e^{t_{11}+x_{21}})]
+alpha_2 &= [log(e^{scores[0,0]} + e^{scores[0,1]}), log(e^{scores[1,0]} + e^{scores[1,1]})] \\ &=[log(e^{log(e^{x_{00}+t_{00}+x_{10}}+e^{x_{01}+t_{01}+x_{10}})+t_{00}+x_{20}} + e^{log(e^{x_{00}+t_{10}+x_{11}}+e^{x_{01}+t_{11}+x_{11}})+t_{01}+x_{20}}), log(e^{log(e^{x_{00}+t_{00}+x_{10}}+e^{x_{01}+t_{01}+x_{10}})+t_{10}+x_{21}} + e^{log(e^{x_{00}+t_{10}+x_{11}}+e^{x_{01}+t_{11}+x_{11}})+t_{11}+x_{21}})] \\ &=[log((e^{x_{00}+t_{00}+x_{10}}+e^{x_{01}+t_{01}+x_{10}})e^{t_{00}+x_{20}}+(e^{x_{00}+t_{10}+x_{11}}+e^{x_{01}+t_{11}+x_{11}})e^{t_{01}+x_{20}}), log((e^{x_{00}+t_{00}+x_{10}}+e^{x_{01}+t_{01}+x_{10}})e^{t_{10}+x_{21}}+(e^{x_{00}+t_{10}+x_{11}}+e^{x_{01}+t_{11}+x_{11}})e^{t_{11}+x_{21}})]
\end{align}
$$
diff --git a/docs/tutorials/natural_language_processing/ner/index.rst b/docs/tutorials/natural_language_processing/ner/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/natural_language_processing/ner/ner_description.md b/docs/tutorials/natural_language_processing/ner/ner_description.md
old mode 100644
new mode 100755
index 72cc78c63..70063b4ab
--- a/docs/tutorials/natural_language_processing/ner/ner_description.md
+++ b/docs/tutorials/natural_language_processing/ner/ner_description.md
@@ -10,13 +10,13 @@
命名实体识别是一项比较关键的NLP任务,具有广泛的**应用场景**,例如在对话意图理解(NLU)中,通过提取出相应的实体词,能够帮助系统更加准确地理解用户的需求,比如根据用户的问题提取出"天气","北京","今天"这样的词汇,大概率就能知道用户在问些什么;在微博场景中,应用命名实体识别提取出微博短文中重要的实体词,也有利于微博信息的汇总,或者事件热度的统计。
-NER任务一般会被建模成序列标注任务,也就是说,模型的输入是待识别的一串文本序列,模型的输出就是该文本序列对应的标签序列,不同于文本分类任务,这是一种序列到序列的任务。我们来举个例子:
+NER任务一般会被建模成序列标注任务,也就是说,模型的输入是待识别的一串文本序列,模型的输出就是该文本序列对应的标签序列,不同于文本分类任务,这是一种序列到序列的任务。我们来举个例子:
| 姚 | 明 | 担 | 任 | 中 | 国 | 篮 | 协 | 主 | 席 |
| -------- | -------- | ---- | ---- | -------------- | -------------- | -------------- | -------------- | ---- | ---- |
| B-Person | I-Person | O | O | B-Organization | I-Organization | I-Organization | I-Organization | O | O |
-这句话中的每个字分别对应着一个标签, 模型的输入就是上边的文本,模型的输出就是下面的标签序列,我们通过这样的标签序列就能识别出原始文本中的实体。
+这句话中的每个字分别对应着一个标签, 模型的输入就是上边的文本,模型的输出就是下面的标签序列,我们通过这样的标签序列就能识别出原始文本中的实体。
具体地,上边这串文本中,"姚明"对应着Person实体,其中"姚"字是"Person"实体的起始字,所以设置标签为"B-person",其中标签前边的B代表Begin这个单词;"明"字是"Person"实体的中间字,所以设置标签为"I-Person",其中标签前边的I代表Intermediate这个单词。 "中国篮协"对应这Organization实体,相应标签"B-Organization"和"I-Organization"的解读和Person实体是一致的。最后的标签"O"代表"other",表示其他实体类型的标签。
看到这里,相信你已经知道,本节的NER任务要建模完成一件什么事情了,即建模一个序列到序列的模型来找出文本中蕴含的实体。
diff --git a/docs/tutorials/pretrain_model/ALBERT.md b/docs/tutorials/pretrain_model/ALBERT.md
old mode 100644
new mode 100755
index 85183a2a5..9c5196cd2
--- a/docs/tutorials/pretrain_model/ALBERT.md
+++ b/docs/tutorials/pretrain_model/ALBERT.md
@@ -31,7 +31,7 @@ ALBERT 架构的主干和 BERT 类似,都使用了基于 GELU 的非线性激

-之所以可以这样做是因为每次反向传播时都只会更新一个 Token 相关参数,其他参数都不会变。而且在第一次投影的过程中,词与词之间是不会进行交互的,只有在后面的 Attention 过程中才会做交互,我们称为 Sparsely updated。如果词不做交互的话,完全没有必要用一个很高维度的向量去表示,所以就引入一个小的隐藏层。
+之所以可以这样做是因为每次反向传播时都只会更新一个 Token 相关参数,其他参数都不会变。而且在第一次投影的过程中,词与词之间是不会进行交互的,只有在后面的 Attention 过程中才会做交互,我们称为 Sparsely updated。如果词不做交互的话,完全没有必要用一个很高维度的向量去表示,所以就引入一个小的隐藏层。
### Cross-layer parameter sharing
@@ -72,5 +72,3 @@ SOP的正例选取方式与BERT一致(来自同一文档的两个连续段)
## No Dropout
RoBERTA 指出 BERT 一系列模型都是” 欠拟合” 的,所以干脆直接关掉 dropout, 那么在 ALBERT 中也是去掉 Dropout 层可以显著减少临时变量对内存的占用。同时论文发现,Dropout 会损害大型 Transformer-based 模型的性能。
-
-
diff --git a/docs/tutorials/pretrain_model/ELECTRA.md b/docs/tutorials/pretrain_model/ELECTRA.md
old mode 100644
new mode 100755
index 3e93d193f..482caa321
--- a/docs/tutorials/pretrain_model/ELECTRA.md
+++ b/docs/tutorials/pretrain_model/ELECTRA.md
@@ -104,10 +104,3 @@ ELECTRA的RTD任务比MLM的预训练任务好,推出了一种十分适用于N
## 7. 参考文献
[Electra: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555)
-
-
-
-
-
-
-
diff --git a/docs/tutorials/pretrain_model/ELMo.md b/docs/tutorials/pretrain_model/ELMo.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/pretrain_model/ERNIE-Doc.md b/docs/tutorials/pretrain_model/ERNIE-Doc.md
old mode 100644
new mode 100755
index 77c0c74fa..6c7f2582a
--- a/docs/tutorials/pretrain_model/ERNIE-Doc.md
+++ b/docs/tutorials/pretrain_model/ERNIE-Doc.md
@@ -46,7 +46,7 @@ $$
\begin{align}
\hat{H} &= \left[ \hat{H}_{1:T}^{1} \circ \hat{H}_{1:T}^{2} \cdot \cdot \cdot \circ \; \hat{H}_{1:T}^N \right], \quad \text{(skimming phase)} \\
\hat{H}_{1:T}^{i} &= \left[ \hat{h}_1^i \circ \hat{h}_2^i \cdot \cdot \cdot \circ \;\hat{h}_T^i \right] , \quad \text{(skimming phase)} \\
-\tilde{h}_{\tau+1}^{n-1} &= \left[{SG}(\hat{H }\circ h_{\tau}^{n-1}) \circ h_{\tau+1}^{n-1} \right], \quad \text{(retrospective phase)}
+\tilde{h}_{\tau+1}^{n-1} &= \left[{SG}(\hat{H }\circ h_{\tau}^{n-1}) \circ h_{\tau+1}^{n-1} \right], \quad \text{(retrospective phase)}
\end{align}
$$
diff --git a/docs/tutorials/pretrain_model/ERNIE-Gram.md b/docs/tutorials/pretrain_model/ERNIE-Gram.md
old mode 100644
new mode 100755
index 731ca136e..5642925a6
--- a/docs/tutorials/pretrain_model/ERNIE-Gram.md
+++ b/docs/tutorials/pretrain_model/ERNIE-Gram.md
@@ -33,7 +33,7 @@ ERNIE-Gram指出一种观点:这种连续的粗粒度Masking策略会忽略信
- $z_{\text{\\}M}=\{x_1,\text{[M]} , \text{[M]}, x_4, \text{[M]}, x_6\}$
-
+
Contiguously MLM 可通过如下方式进行优化:
@@ -118,4 +118,3 @@ $$
1. [ERNIE-Gram: Pre-training with Explicitly N-Gram Masked language Modeling for Natural Language Understanding](https://arxiv.org/pdf/2010.12148.pdf)
2. [ERNIE-Gram github](https://github.com/PaddlePaddle/ERNIE/tree/develop/ernie-gram)
-
diff --git a/docs/tutorials/pretrain_model/ERNIE3.md b/docs/tutorials/pretrain_model/ERNIE3.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/pretrain_model/GPT.md b/docs/tutorials/pretrain_model/GPT.md
old mode 100644
new mode 100755
index a472494b0..e59ca5ad5
--- a/docs/tutorials/pretrain_model/GPT.md
+++ b/docs/tutorials/pretrain_model/GPT.md
@@ -87,7 +87,7 @@ $$L_{2}(C)=\sum_{x,y}log P(y|x^1,..,x^m)$$
正常来说,我们应该调整参数使得$L_{2}$最大,但是为了提高训练速度和模型的泛化能力,我们使用 Multi-Task Learning,GPT 在微调的时候也考虑预训练的损失函数,同时让它最大似然$L_{1}$和$L_{2}$
-$$L_{3}(C)=L_{2}(C)+\lambda \times L_{1}(C) $$
+$$L_{3}(C)=L_{2}(C)+\lambda \times L_{1}(C) $$
这里使用的$L_{1}$还是之前语言模型的损失(似然),但是使用的数据不是前面无监督的数据$U$,而是使用当前任务的数据$C$,而且只使用其中的$X$,而不需要标签y。
@@ -100,7 +100,7 @@ $$L_{3}(C)=L_{2}(C)+\lambda \times L_{1}(C) $$
+ Classification:对于分类问题,不需要做什么修改
+ Entailment:对于推理问题,可以将先验与假设使用一个分隔符分开
+ Similarity:对于相似度问题,由于模型是单向的,但相似度与顺序无关,所以要将两个句子顺序颠倒后,把两次输入的结果相加来做最后的推测
-+ Multiple-Choice:对于问答问题,则是将上下文、问题放在一起与答案分隔开,然后进行预测
++ Multiple-Choice:对于问答问题,则是将上下文、问题放在一起与答案分隔开,然后进行预测
## 4. GPT特点
@@ -123,6 +123,3 @@ GPT 与 ELMo 有两个主要的区别:
## 参考文献
[Improving Language Understanding by Generative Pre-Training](https://www.semanticscholar.org/paper/Improving-Language-Understanding-by-Generative-Radford-Narasimhan/cd18800a0fe0b668a1cc19f2ec95b5003d0a5035)
-
-
-
diff --git a/docs/tutorials/pretrain_model/KBERT.md b/docs/tutorials/pretrain_model/KBERT.md
old mode 100644
new mode 100755
index a1e3de7e4..f4d907bea
--- a/docs/tutorials/pretrain_model/KBERT.md
+++ b/docs/tutorials/pretrain_model/KBERT.md
@@ -72,4 +72,3 @@ $$
1. [KBERT: Enabling Language Representation with Knowledge Graph](https://arxiv.org/pdf/1909.07606v1.pdf)
2. [KBERT Github](https://github.com/autoliuweijie/K-BERT)
-
diff --git a/docs/tutorials/pretrain_model/Performer.md b/docs/tutorials/pretrain_model/Performer.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/pretrain_model/RoBERTa.md b/docs/tutorials/pretrain_model/RoBERTa.md
old mode 100644
new mode 100755
index 73fac322b..c9885d35f
--- a/docs/tutorials/pretrain_model/RoBERTa.md
+++ b/docs/tutorials/pretrain_model/RoBERTa.md
@@ -35,7 +35,7 @@ FULL-SENTENCES表示从一篇文章或者多篇文章中连续抽取句子,填
## 3. Larger Batch Size
-RoBERTa通过增加训练过程中Batch Size的大小,来观看模型的在预训练任务和down-stream任务上的表现。发现增加Batch Size有利于降低保留的训练数据的Perplexity,提高down-stream的指标。
+RoBERTa通过增加训练过程中Batch Size的大小,来观看模型的在预训练任务和down-stream任务上的表现。发现增加Batch Size有利于降低保留的训练数据的Perplexity,提高down-stream的指标。

diff --git a/docs/tutorials/pretrain_model/SpanBERT.md b/docs/tutorials/pretrain_model/SpanBERT.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/pretrain_model/THU-ERNIE.md b/docs/tutorials/pretrain_model/THU-ERNIE.md
old mode 100644
new mode 100755
index 5f0dd85ed..e3fd3f1bc
--- a/docs/tutorials/pretrain_model/THU-ERNIE.md
+++ b/docs/tutorials/pretrain_model/THU-ERNIE.md
@@ -34,7 +34,7 @@
$$
\{\tilde{w}_{1}^{(i-1)},\tilde{w}_{2}^{(i-1)},...,\tilde{w}_{n}^{(i-1)}\} = \text{MH-ATT}(\{w_{1}^{(i-1)},w_{2}^{(i-1)},...,w_{n}^{(i-1)}\}) \\
-\{\tilde{e}_{1}^{(i-1)},\tilde{e}_{2}^{(i-1)},...,\tilde{e}_{m}^{(i-1)}\} = \text{MH-ATT}(\{e_{1}^{(i-1)},e_{2}^{(i-1)},...,e_{m}^{(i-1)}\})
+\{\tilde{e}_{1}^{(i-1)},\tilde{e}_{2}^{(i-1)},...,\tilde{e}_{m}^{(i-1)}\} = \text{MH-ATT}(\{e_{1}^{(i-1)},e_{2}^{(i-1)},...,e_{m}^{(i-1)}\})
$$
然后Entity序列的输出将被对齐到token序列的第一个token上,例如实体"bob dylan"将被对齐到第一个单词"bob"上。接下里将这些MH-ATT的输入到Fusion层,在这里将进行文本信息和KG知识的信息融合。因为有些token没有对应的entity,有些token有对应的entity,所以这里需要分两种情况讨论。
@@ -43,7 +43,7 @@ $$
$$
h_j = \sigma(\tilde{W}_t^{(i)}\tilde{w}_j^{(i)}+\tilde{W}_e^{(i)}\tilde{e}_k^{(i)}+\tilde{b}^{(i)}) \\
-w_j^{(i)} = \sigma({W}_t^{(i)}{h}_j+b_t^{(i)}) \\
+w_j^{(i)} = \sigma({W}_t^{(i)}{h}_j+b_t^{(i)}) \\
e_k^{(i)} = \sigma({W}_e^{(i)}{h}_j+b_e^{(i)})
$$
@@ -52,7 +52,7 @@ $$
$$
h_j = \sigma(\tilde{W}_t^{(i)}\tilde{w}_j^{(i)}+\tilde{b}^{(i)}) \\
-w_j^{(i)} = \sigma({W}_t^{(i)}{h}_j+b_t^{(i)})
+w_j^{(i)} = \sigma({W}_t^{(i)}{h}_j+b_t^{(i)})
$$
其中这里的$\sigma(\cdot)$是个非线性的激活函数,通常可以使用GELU函数。最后一层的输出将被视作融合文本信息和KG知识的最终向量。
@@ -81,4 +81,3 @@ $$
1. [ERNIE:Enhanced Language Representation with Informative Entities](https://arxiv.org/pdf/1905.07129.pdf)
2. [ERNIE Githut](https://github.com/thunlp/ERNIE)
-
diff --git a/docs/tutorials/pretrain_model/Transformer-XL.md b/docs/tutorials/pretrain_model/Transformer-XL.md
old mode 100644
new mode 100755
index 8583b6826..14bc2e29a
--- a/docs/tutorials/pretrain_model/Transformer-XL.md
+++ b/docs/tutorials/pretrain_model/Transformer-XL.md
@@ -53,7 +53,7 @@ $$
$$
h_{\tau+1} = f(h_{\tau},\; E_{\text{s}_{\tau+1}}+U_{1:L}) \\
-h_{\tau} = f(h_{\tau-1},\; E_{\text{s}_{\tau}}+U_{1:L})
+h_{\tau} = f(h_{\tau-1},\; E_{\text{s}_{\tau}}+U_{1:L})
$$
很明显,如果按照这个方式计算,前后两个段$E_{\text{s}_{\tau}}$和$E_{\text{s}_{\tau+1}}$将具有相同的位置编码,这样两者信息融合的时候肯定会造成位置信息混乱。为了避免这份尴尬的操作,**Transformer-XL**使用了**相对位置编码**。
diff --git a/docs/tutorials/pretrain_model/XLNet.md b/docs/tutorials/pretrain_model/XLNet.md
old mode 100644
new mode 100755
index 75c46147e..1daa00504
--- a/docs/tutorials/pretrain_model/XLNet.md
+++ b/docs/tutorials/pretrain_model/XLNet.md
@@ -169,7 +169,7 @@ $$
$$
\begin{align}
-\mathop{max}_{\theta} \quad \mathbb{E}_{\text{z}∼\mathbb{Z}} log \, p_{\theta}(\text{x}_{z_{>c}}|\text{x}_{\leq c}) = \mathop{max}_{\theta}\quad \mathbb{E}_{\text{z}∼\mathbb{Z}} \left[ \sum_{t=c+1}^n log\;p_{\theta}(x_{z_t}|\text{x}_{\text{z}_{c}}|\text{x}_{\leq c}) = \mathop{max}_{\theta}\quad \mathbb{E}_{\text{z}∼\mathbb{Z}} \left[ \sum_{t=c+1}^n log\;p_{\theta}(x_{z_t}|\text{x}_{\text{z}_{
图3 BERT 用于不同的 NLP 任务
@@ -237,4 +237,3 @@ BERT使用的是双向的Transformer,OpenAI GPT使用的是从左到右的Tran
+ 模型参数太多,而且模型太大,少量数据训练时,容易过拟合。
+ BERT的NSP任务效果不明显,MLM存在和下游任务mismathch的情况。
+ BERT对生成式任务和长序列建模支持不好。
-
diff --git a/docs/tutorials/pretrain_model/erine.md b/docs/tutorials/pretrain_model/erine.md
old mode 100644
new mode 100755
index 44cf84ee8..cb4925cc4
--- a/docs/tutorials/pretrain_model/erine.md
+++ b/docs/tutorials/pretrain_model/erine.md
@@ -34,4 +34,3 @@ Next Sentence Prediction(NSP)的任务是判断连个句子是否是具有
除了上边的Knowledge Masking外,ERNIE还采用多个**异源语料**帮助模型训练,例如对话数据,新闻数据,百科数据等等。通过这些改进以保证模型在字词、语句和语义方面更深入地学习到语言知识。当ERINE通过这些预训练任务学习之后,就会变成一个更懂语言知识的预训练模型,接下来,就可以应用ERINE在不同的**下游任务**进行微调,提高下游任务的效果。例如,文本分类任务。
> **异源语料** :来自不同源头的数据,比如百度贴吧,百度新闻,维基百科等等
-
diff --git a/docs/tutorials/pretrain_model/index.rst b/docs/tutorials/pretrain_model/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/pretrain_model/longformer.md b/docs/tutorials/pretrain_model/longformer.md
old mode 100644
new mode 100755
index 8f9b98746..f5c21bc68
--- a/docs/tutorials/pretrain_model/longformer.md
+++ b/docs/tutorials/pretrain_model/longformer.md
@@ -63,4 +63,3 @@ $$
1. [Longformer: The Long-Document Transformer](https://arxiv.org/pdf/2004.05150.pdf)
2. [Longformer Github](https://github.com/allenai/longformer)
-
diff --git a/docs/tutorials/pretrain_model/pretrain_model_description.md b/docs/tutorials/pretrain_model/pretrain_model_description.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/pretrain_model/subword.md b/docs/tutorials/pretrain_model/subword.md
old mode 100644
new mode 100755
index d906201fc..9321a8aa5
--- a/docs/tutorials/pretrain_model/subword.md
+++ b/docs/tutorials/pretrain_model/subword.md
@@ -104,7 +104,7 @@ BPE获得Subword的步骤如下:
+ +1,表明加入合并后的新子词,同时原来的2个子词还保留(2个子词分开出现在语料中)。
+ +0,表明加入合并后的新子词,同时原来的2个子词中一个保留,一个被消解(一个子词完全随着另一个子词的出现而紧跟着出现)。
-+ -1,表明加入合并后的新子词,同时原来的2个子词都被消解(2个子词同时连续出现)。
++ -1,表明加入合并后的新子词,同时原来的2个子词都被消解(2个子词同时连续出现)。
实际上,随着合并的次数增加,词表大小通常先增加后减小。
@@ -182,7 +182,3 @@ $$L=\sum_{s=1}^{|D|}log(P(X^{(s)}))=\sum_{s=1}^{|D|}log(\sum_{x \in U(X^{(s)})}P
## 三种子词分词器的关系

-
-
-
-
diff --git a/docs/tutorials/pretrain_model/transformer.md b/docs/tutorials/pretrain_model/transformer.md
old mode 100644
new mode 100755
index cb6cf1464..8a962adb5
--- a/docs/tutorials/pretrain_model/transformer.md
+++ b/docs/tutorials/pretrain_model/transformer.md
@@ -11,13 +11,13 @@ Transformer改进了RNN被人诟病的训练慢的特点,利用self-attention
## 2.Transformer直观认识
Transformer主要由encoder和decoder两部分组成。在Transformer的论文中,encoder和decoder均由6个encoder layer和decoder layer组成,通常我们称之为encoder block。
-
+
transformer结构
每一个encoder和decoder的内部简版结构如下图
-
+
transformer的encoder或者decoder的内部结构
@@ -30,7 +30,7 @@ decoder也包含encoder提到的两层网络,但是在这两层中间还有一
首先,模型需要对输入的数据进行一个embedding操作,enmbedding结束之后,输入到encoder层,self-attention处理完数据后把数据送给前馈神经网络,前馈神经网络的计算可以并行,得到的输出会输入到下一个encoder。
-
+
embedding和self-attention
@@ -95,30 +95,30 @@ $$X_{hidden}=LayerNorm(X_{hidden})$$
### 3.2.1 自注意力机制
- 首先,自注意力机制(self-attention)会计算出三个新的向量,在论文中,向量的维度是512维,我们把这三个向量分别称为Query、Key、Value,这三个向量是用embedding向量与一个矩阵相乘得到的结果,这个矩阵是随机初始化的,维度为(64,512)注意第二个维度需要和embedding的维度一样,其值在反向传播的过程中会一直进行更新,得到的这三个向量的维度是64低于embedding维度的。
-
+
Query Key Value
2、计算self-attention的分数值,该分数值决定了当我们在某个位置encode一个词时,对输入句子的其他部分的关注程度。这个分数值的计算方法是Query与Key做点乘,以下图为例,首先我们需要针对Thinking这个词,计算出其他词对于该词的一个分数值,首先是针对于自己本身即q1·k1,然后是针对于第二个词即q1·k2
-
+
Query Key Value
3、接下来,把点乘的结果除以一个常数,这里我们除以8,这个值一般是采用上文提到的矩阵的第一个维度的开方即64的开方8,当然也可以选择其他的值,然后把得到的结果做一个softmax的计算。得到的结果即是每个词对于当前位置的词的相关性大小,当然,当前位置的词相关性肯定会会很大
-
+
softmax
4、下一步就是把Value和softmax得到的值进行相乘,并相加,得到的结果即是self-attetion在当前节点的值。
-
+
dot product
在实际的应用场景,为了提高计算速度,我们采用的是矩阵的方式,直接计算出Query, Key, Value的矩阵,然后把embedding的值与三个矩阵直接相乘,把得到的新矩阵Q与K相乘,乘以一个常数,做softmax操作,最后乘上V矩阵
-
+
scaled dot product attention
@@ -151,12 +151,12 @@ softmax就是直接计算了,时间复杂度为: $O(n^2)$
不仅仅只初始化一组Q、K、V的矩阵,而是初始化多组,tranformer是使用了8组,所以最后得到的结果是8个矩阵。
-
+
multi-head attention
multi-head注意力的全过程如下,首先输入句子,“Thinking Machines”,在embedding模块把句子中的每个单词变成向量X,在encoder层中,除了第0层有embedding操作外,其他的层没有embedding操作;接着把X分成8个head,
-
+
multi-head attention总体结构
@@ -222,7 +222,7 @@ $$FFN(x)=ReLU(W_{1}x+b_{1})W_{2}+b_{2}$$
和 Encoder 一样,上面三个部分的每一个部分,都有一个残差连接,后接一个 Layer Normalization。Decoder 的中间部件并不复杂,大部分在前面 Encoder 里我们已经介绍过了,但是 Decoder 由于其特殊的功能,因此在训练时会涉及到一些细节,下面会介绍Decoder的Masked Self-Attention和Encoder-Decoder Attention两部分,其结构图如下图所示
-
+
decoder self attention
@@ -237,7 +237,7 @@ Mask 非常简单,首先生成一个下三角全 0,上三角全为负无穷
### 3.3.2 Masked Encoder-Decoder Attention
其实这一部分的计算流程和前面 Masked Self-Attention 很相似,结构也一摸一样,唯一不同的是这里的K,V为 Encoder 的输出,Q为 Decoder 中 Masked Self-Attention 的输出
-
+
Masked Encoder-Decoder Attention
@@ -284,6 +284,3 @@ Embedding层可以说是通过onehot去取到对应的embedding向量,FC层可
## 5. 参考文献
[Attention Is All You Need](https://arxiv.org/abs/1706.03762)
-
-
-
diff --git a/docs/tutorials/recommendation_system/evaluation_metric.md b/docs/tutorials/recommendation_system/evaluation_metric.md
old mode 100644
new mode 100755
index d86bd83e3..e54fb0a4b
--- a/docs/tutorials/recommendation_system/evaluation_metric.md
+++ b/docs/tutorials/recommendation_system/evaluation_metric.md
@@ -357,8 +357,3 @@ Interleaving 方法的优缺点
### 多样性
用户的兴趣是多样的,在做推荐的时候需要给用户提供多样的物品,可以挖掘新用户的兴趣点,拓展用户的兴趣范围,提升用户的体验
-
-
-
-
-
diff --git a/docs/tutorials/recommendation_system/index.rst b/docs/tutorials/recommendation_system/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/recommendation_system/recommender_system.md b/docs/tutorials/recommendation_system/recommender_system.md
old mode 100644
new mode 100755
index a44ef2526..270cf3bed
--- a/docs/tutorials/recommendation_system/recommender_system.md
+++ b/docs/tutorials/recommendation_system/recommender_system.md
@@ -120,5 +120,4 @@
3、衡量指标
-CTR (Click Through Rate):当给用户推荐他真实喜欢的内容时,用户就会产生比较大的点击意愿,进而产生较高的点击。
-
+CTR (Click Through Rate):当给用户推荐他真实喜欢的内容时,用户就会产生比较大的点击意愿,进而产生较高的点击。
diff --git a/docs/tutorials/reinforcement_learning/Actor-Critic.md b/docs/tutorials/reinforcement_learning/Actor-Critic.md
old mode 100644
new mode 100755
index fae38ef14..b0c8fe443
--- a/docs/tutorials/reinforcement_learning/Actor-Critic.md
+++ b/docs/tutorials/reinforcement_learning/Actor-Critic.md
@@ -58,21 +58,21 @@ $$L=\sum log\pi_{\theta}(s_{t},a_{t})(E(G_{t}-V_{s_{t}}))$$
因为$E(G_{t}|s_{t},a_{t})=Q(s_{t},a_{t})$,故进一步变成:
$$L=\sum log \pi_{\theta}(s_{t},a_{t})(Q(s_{t},a_{t}),V(s_{t}))$$
-
+
照上面的式子看来,我们需要两个网络去估计$Q(s_{t},a_{t})$和$V(s_{t})$,但是考虑到贝尔曼方程:
-
+
$$Q(s_{t},a_{t})=E(r+\gamma V(s_{t+1}))$$
-
+
弃掉期望:
-
+
$$Q(s_{t},a_{t})=r+\gamma V(s_{t+1})$$
-
+
在原始的A3C论文中试了各种方法,最后做出来就是直接把期望值拿掉最好,这是根据实验得出来的。
最终的式子为:
-
+
$$L=\sum log\pi_{\theta}(s_{t},a_{t})(r+\gamma V(s_{t+1})-V(s_{t}))$$
这样只需要一个网络就可以估算出V值了,而估算V的网络正是我们在 Q-learning 中做的,所以我们就把这个网络叫做 Critic。这样就在 Policy Gradient 算法的基础上引进了 Q-learning 算法了
-
+

## Actor-Critic算法流程
@@ -92,7 +92,7 @@ $$L=\sum log\pi_{\theta}(s_{t},a_{t})(E(G_{t}-V_{s_{t}}))$$
5. 使用均方差损失函数$\sum (R+\gamma V(S')-V(S,w))^2$作Critic网络参数w的梯度更新;
6. 更新Actor网络参数$\theta$:
$$\theta=\theta+\alpha \nabla_{\theta} log \pi_{\theta}(S_{t},A)\delta $$
-
+
对于Actor的分值函数$\nabla_{\theta} log \pi_{\theta}(S_{t},A)$,可以选择softmax或者高斯分值函数。
## Actor-Critic优缺点
@@ -111,7 +111,3 @@ $$L=\sum log\pi_{\theta}(s_{t},a_{t})(E(G_{t}-V_{s_{t}}))$$
+ DDPG算法,使用了双Actor神经网络和双Critic神经网络的方法来改善收敛性。
+ A3C算法,使用了多线程的方式,一个主线程负责更新Actor和Critic的参数,多个辅线程负责分别和环境交互,得到梯度更新值,汇总更新主线程的参数。而所有的辅线程会定期从主线程更新网络参数。这些辅线程起到了类似DQN中经验回放的作用,但是效果更好。
-
-
-
-
diff --git a/docs/tutorials/reinforcement_learning/DQN.md b/docs/tutorials/reinforcement_learning/DQN.md
old mode 100644
new mode 100755
index 9500b2371..931e626a7
--- a/docs/tutorials/reinforcement_learning/DQN.md
+++ b/docs/tutorials/reinforcement_learning/DQN.md
@@ -36,4 +36,3 @@
> 3. end for
> 4. 每隔固定个episode,更新Qtarget网络
>3. end for
-
diff --git a/docs/tutorials/reinforcement_learning/Q-learning.md b/docs/tutorials/reinforcement_learning/Q-learning.md
old mode 100644
new mode 100755
index d2b5f5257..ba051cc40
--- a/docs/tutorials/reinforcement_learning/Q-learning.md
+++ b/docs/tutorials/reinforcement_learning/Q-learning.md
@@ -79,5 +79,3 @@ $$ Q( (3,2), down) = 0.6 + 0.1× ( -0.4 + 0.5 × max [0.2, 0.4, 0.6] – 0.6)=0.
## 优缺点
Q-Learning算法有一些缺点,比如状态和动作都假设是离散且有限的,对于复杂的情况处理起来会很麻烦;智能体的决策只依赖当前环境的状态,所以如果状态之间存在时序关联那么学习的效果就不佳。
-
-
diff --git a/docs/tutorials/reinforcement_learning/Sarsa.md b/docs/tutorials/reinforcement_learning/Sarsa.md
old mode 100644
new mode 100755
index a454eeab2..16d58c1e3
--- a/docs/tutorials/reinforcement_learning/Sarsa.md
+++ b/docs/tutorials/reinforcement_learning/Sarsa.md
@@ -77,4 +77,4 @@ SARSA 算法经常与Q-learning 算法作比较,以便探索出两种算法分
+ Q-learning具有比SARSA更高的每样本方差,并且可能因此产生收敛问题。当通过Q-learning训练神经网络时,这会成为一个问题。
+ SARSA在接近收敛时,允许对探索性的行动进行可能的惩罚,而Q-learning会直接忽略,这使得SARSA算法更加保守。如果存在接近最佳路径的大量负面报酬的风险,Q-learning将倾向于在探索时触发奖励,而SARSA将倾向于避免危险的最佳路径并且仅在探索参数减少时慢慢学会使用它。
-如果是在模拟中或在低成本和快速迭代的环境中训练代理,那么由于第一点(直接学习最优策略),Q-learning是一个不错的选择。 如果代理是在线学习,并且注重学习期间获得的奖励,那么SARSA算法更加适用。
\ No newline at end of file
+如果是在模拟中或在低成本和快速迭代的环境中训练代理,那么由于第一点(直接学习最优策略),Q-learning是一个不错的选择。 如果代理是在线学习,并且注重学习期间获得的奖励,那么SARSA算法更加适用。
diff --git a/docs/tutorials/reinforcement_learning/basic_information.md b/docs/tutorials/reinforcement_learning/basic_information.md
old mode 100644
new mode 100755
index 5fff9249b..985e12ef9
--- a/docs/tutorials/reinforcement_learning/basic_information.md
+++ b/docs/tutorials/reinforcement_learning/basic_information.md
@@ -15,6 +15,3 @@

在flappy bird游戏中,小鸟即为智能体,除小鸟以外的整个游戏环境可统称为环境,状态可以理解为在当前时间点的游戏图像。在本游戏中,智能体可以执行的动作为向上飞,或什么都不做靠重力下降。策略则指小鸟依据什么来判断是要执行向上飞的动作还是什么都不做,这个策略可能是根据值函数大小判断,也可能是依据在当前状态下执行不同动作的概率或是其他的判断方法。奖励分为奖励和惩罚两种,每当小鸟安全的飞过一个柱子都会获得一分的奖励,而如果小鸟掉到地上或者撞到柱子则或获得惩罚。
-
-
-
diff --git a/docs/tutorials/reinforcement_learning/index.rst b/docs/tutorials/reinforcement_learning/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/reinforcement_learning/markov_decision_process.md b/docs/tutorials/reinforcement_learning/markov_decision_process.md
old mode 100644
new mode 100755
index ec0b41701..52da7840f
--- a/docs/tutorials/reinforcement_learning/markov_decision_process.md
+++ b/docs/tutorials/reinforcement_learning/markov_decision_process.md
@@ -29,4 +29,4 @@ $$
* R是奖励函数:智能体在某个状态 $s$ 下,采取动作 $a$ 后,与环境交互后所获得的奖励,表示为 $R(s_{t}=s,a_{t}=a)$
* $\gamma$ 是折扣因子(discounted factor),取值区间为 $[0,1]$
-所以MDP过程可以表示为 $(S,A,P,R,\gamma)$,如果该过程中的状态转移矩阵 $P$ 和奖励 $R(s,a)$ 对智能体都是可见的,我们称这样的Agent为Model-based Agent,否则称为Model-free Agent。
\ No newline at end of file
+所以MDP过程可以表示为 $(S,A,P,R,\gamma)$,如果该过程中的状态转移矩阵 $P$ 和奖励 $R(s,a)$ 对智能体都是可见的,我们称这样的Agent为Model-based Agent,否则称为Model-free Agent。
diff --git a/docs/tutorials/reinforcement_learning/policy_gradient.md b/docs/tutorials/reinforcement_learning/policy_gradient.md
old mode 100644
new mode 100755
index 05367b402..058c523b5
--- a/docs/tutorials/reinforcement_learning/policy_gradient.md
+++ b/docs/tutorials/reinforcement_learning/policy_gradient.md
@@ -99,7 +99,7 @@ $$
> 输入:马尔可夫决策过程$MDP=(S, A, P, R, \gamma)$,即状态,智能体,决策,奖励和折现系数,$\gamma = 1$,暂不讨论。
>输出:策略 $\pi(a|s, \theta)$,即在状态为s,参数为$\theta$的条件下,选择动作a的概率。
> 算法的具体流程:
->
+>
> 1. 随机初始化;
> 2. repeat
> 3. 根据策略$\pi_\theta$采样一个片段(episode,即智能体由初始状态不断通过动作与环境交互,直至终止状态的过程),获得$s_0, a_0, R_1, s_1, ..., s_T-1, a_T-1, R_T$;
@@ -107,4 +107,3 @@ $$
> 1. $G \leftarrow \sum_{k=1}^{T-t} \gamma_{k-1} R_{t+k}$,G是对回报的计算,回报是奖励随时间步的积累,在本实验中,$\gamma = 1$。
> 2. $\theta = \theta + \eta\gamma^{'} G\nabla_\theta\ln\pi_\theta(s_t, a_t)$,其中$\eta$是学习率。策略梯度算法采用神经网络来拟合策略梯度函数,计算策略梯度用于优化策略网络。
> 4. 直到$\theta$收敛
-
diff --git a/docs/tutorials/sequence_model/gru.md b/docs/tutorials/sequence_model/gru.md
old mode 100644
new mode 100755
index 4856c8639..0cec1da11
--- a/docs/tutorials/sequence_model/gru.md
+++ b/docs/tutorials/sequence_model/gru.md
@@ -75,4 +75,3 @@ $z_t$ 为更新门的激活结果,它同样以门控的形式控制了信息
Chung, J., Gulcehre, C., Cho, K., & Bengio, Y. (2014). Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555.[链接](https://arxiv.org/pdf/1412.3555.pdf)
[经典必读:门控循环单元(GRU)的基本概念与原理](https://www.jiqizhixin.com/articles/2017-12-24)
-
diff --git a/docs/tutorials/sequence_model/index.rst b/docs/tutorials/sequence_model/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/sequence_model/lstm.md b/docs/tutorials/sequence_model/lstm.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/sequence_model/rnn.md b/docs/tutorials/sequence_model/rnn.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/sequence_model/word_representation/emb_applications.md b/docs/tutorials/sequence_model/word_representation/emb_applications.md
old mode 100644
new mode 100755
index b188ca570..39da11d60
--- a/docs/tutorials/sequence_model/word_representation/emb_applications.md
+++ b/docs/tutorials/sequence_model/word_representation/emb_applications.md
@@ -23,4 +23,4 @@
这是比较有趣的一个应用,通过词语义上的一些关系来进行推理一些词,例如下面几个例子。
* King - Man + Woman = Queen
-* China - Beijing + Washington = America
+* China - Beijing + Washington = America
diff --git a/docs/tutorials/sequence_model/word_representation/index.rst b/docs/tutorials/sequence_model/word_representation/index.rst
old mode 100644
new mode 100755
diff --git a/docs/tutorials/sequence_model/word_representation/one-hot.md b/docs/tutorials/sequence_model/word_representation/one-hot.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/sequence_model/word_representation/word2vec.md b/docs/tutorials/sequence_model/word_representation/word2vec.md
old mode 100644
new mode 100755
diff --git a/docs/tutorials/sequence_model/word_representation/word_embedding.md b/docs/tutorials/sequence_model/word_representation/word_embedding.md
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/README.md b/examples/DDPG for Stock Trading/README.md
old mode 100644
new mode 100755
index 7de54429e..9351a3bf8
--- a/examples/DDPG for Stock Trading/README.md
+++ b/examples/DDPG for Stock Trading/README.md
@@ -342,12 +342,12 @@ class StockTradingEnv(gym.Env):
self.observation_space = spaces.Box(
low=0, high=1, shape=(19,), dtype=np.float32)
-
+
def seed(self, seed):
random.seed(seed)
np.random.seed(seed)
-
+
# 处理状态
def _next_observation(self):
# 有些股票数据缺失一些数据,处理一下
@@ -507,7 +507,7 @@ class ReplayBuffer(object):
self.device = paddle.get_device()
-
+
# 存入数据
def add(self, state, action, next_state, reward, done):
self.states[self.cur] = state
@@ -520,7 +520,7 @@ class ReplayBuffer(object):
self.cur = (self.cur + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
-
+
# 采样
def sample(self, batch):
ids = np.random.randint(0, self.size, size=batch)
@@ -594,7 +594,7 @@ class Actor(nn.Layer):
class Critic(nn.Layer):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
-
+
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
@@ -605,7 +605,7 @@ class Critic(nn.Layer):
return self.l3(q)
-# DDPG算法模型
+# DDPG算法模型
class DDPGModel(object):
def __init__(self, state_dim, action_dim, max_action, gamma = 0.99, tau = 0.001):
# 动作网络与目标动作网络
@@ -627,7 +627,7 @@ class DDPGModel(object):
state = paddle.to_tensor(state.reshape(1, -1), dtype='float32', place=device)
return self.actor(state).numpy().flatten()
-
+
# 训练函数
def train(self, replay_buffer, batch=64):
# 从缓存容器中采样
@@ -713,7 +713,7 @@ def eval_policy(policy, df, seed, eval_episodes=10):
for _ in range(eval_episodes):
# 初始化环境
state, done = eval_env.reset(), False
-
+
# 与环境交互
while not done:
action = policy.select_action(state)
@@ -721,7 +721,7 @@ def eval_policy(policy, df, seed, eval_episodes=10):
action[0] *= 3
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
-
+
# 计算平均奖励
avg_reward /= eval_episodes
@@ -793,7 +793,7 @@ if __name__ == '__main__':
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
policy.load(f'./models/{policy_file}')
-
+
# 设置缓存容器
replay_buffer = ReplayBuffer.ReplayBuffer(state_dim, action_dim)
```
@@ -897,7 +897,7 @@ def eval_policy(policy, df, seed, eval_episodes=10):
for _ in range(eval_episodes):
# 初始化环境
state, done = eval_env.reset(), False
-
+
# 与环境交互
while not done:
action = policy.select_action(state)
@@ -905,7 +905,7 @@ def eval_policy(policy, df, seed, eval_episodes=10):
action[0] *= 3
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
-
+
# 计算平均奖励
avg_reward /= eval_episodes
@@ -988,4 +988,4 @@ python test.py
## 5.项目总结
-本项目为大家详细介绍了如何建模股票环境环境,并用强化学习算法求解。其中也详细解释强化学习的基础要素(如:环境、状态、动作、奖励等)在本项目中的对应关系。同时,也为大家清晰的讲解了DDPG算法的由来、解决了什么痛点、创新点是什么,以及最主要的,DDPG算法的具体流程与案例应用。
\ No newline at end of file
+本项目为大家详细介绍了如何建模股票环境环境,并用强化学习算法求解。其中也详细解释强化学习的基础要素(如:环境、状态、动作、奖励等)在本项目中的对应关系。同时,也为大家清晰的讲解了DDPG算法的由来、解决了什么痛点、创新点是什么,以及最主要的,DDPG算法的具体流程与案例应用。
diff --git a/examples/DDPG for Stock Trading/ReplayBuffer.py b/examples/DDPG for Stock Trading/ReplayBuffer.py
old mode 100644
new mode 100755
index c0c0578b6..449af6cb6
--- a/examples/DDPG for Stock Trading/ReplayBuffer.py
+++ b/examples/DDPG for Stock Trading/ReplayBuffer.py
@@ -17,7 +17,6 @@ def __init__(self, state_dim, action_dim, max_size=int(1e4)):
self.device = paddle.get_device()
-
# 存入数据
def add(self, state, action, next_state, reward, done):
self.states[self.cur] = state
@@ -30,18 +29,22 @@ def add(self, state, action, next_state, reward, done):
self.cur = (self.cur + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
-
# 采样
def sample(self, batch):
ids = np.random.randint(0, self.size, size=batch)
# 返回paddle张量
- return (
- paddle.to_tensor(self.states[ids], dtype='float32', place=self.device),
- paddle.to_tensor(self.actions[ids], dtype='float32', place=self.device),
- paddle.to_tensor(self.next_states[ids], dtype='float32', place=self.device),
- paddle.to_tensor(self.rewards[ids], dtype='float32', place=self.device),
- paddle.to_tensor(self.dones[ids], dtype='float32', place=self.device)
- )
-
-
\ No newline at end of file
+ return (paddle.to_tensor(
+ self.states[ids], dtype='float32', place=self.device),
+ paddle.to_tensor(
+ self.actions[ids], dtype='float32',
+ place=self.device), paddle.to_tensor(
+ self.next_states[ids],
+ dtype='float32',
+ place=self.device), paddle.to_tensor(
+ self.rewards[ids],
+ dtype='float32',
+ place=self.device), paddle.to_tensor(
+ self.dones[ids],
+ dtype='float32',
+ place=self.device))
diff --git a/examples/DDPG for Stock Trading/StockEnv.py b/examples/DDPG for Stock Trading/StockEnv.py
old mode 100644
new mode 100755
index 3b0aedc37..7f3b41cb9
--- a/examples/DDPG for Stock Trading/StockEnv.py
+++ b/examples/DDPG for Stock Trading/StockEnv.py
@@ -6,17 +6,17 @@
from gym import spaces
# 默认的一些数据,用于归一化属性值
-MAX_ACCOUNT_BALANCE = 214748 # 组大的账户财产
-MAX_NUM_SHARES = 214748 # 最大的手数
-MAX_SHARE_PRICE = 5000 # 最大的单手价格
-MAX_VOLUME = 1000e6 # 最大的成交量
-MAX_AMOUNT = 3e5 # 最大的成交额
-MAX_OPEN_POSITIONS = 5 # 最大的持仓头寸
-MAX_STEPS = 500 # 最大的交互次数
-MAX_DAY_CHANGE = 1 # 最大的日期改变
-max_loss =-50000 # 最大的损失
-max_predict_rate = 4 # 最大的预测率
-INITIAL_ACCOUNT_BALANCE = 10000 # 初始的金钱
+MAX_ACCOUNT_BALANCE = 214748 # 组大的账户财产
+MAX_NUM_SHARES = 214748 # 最大的手数
+MAX_SHARE_PRICE = 5000 # 最大的单手价格
+MAX_VOLUME = 1000e6 # 最大的成交量
+MAX_AMOUNT = 3e5 # 最大的成交额
+MAX_OPEN_POSITIONS = 5 # 最大的持仓头寸
+MAX_STEPS = 500 # 最大的交互次数
+MAX_DAY_CHANGE = 1 # 最大的日期改变
+max_loss = -50000 # 最大的损失
+max_predict_rate = 4 # 最大的预测率
+INITIAL_ACCOUNT_BALANCE = 10000 # 初始的金钱
class StockTradingEnv(gym.Env):
@@ -30,26 +30,27 @@ def __init__(self, df):
self.reward_range = (0, MAX_ACCOUNT_BALANCE)
# 动作的可能情况:买入x%, 卖出x%, 观望
- self.action_space = spaces.Box(
- low=np.array([-3, 0]), high=np.array([3, 1]), dtype=np.float32)
+ self.action_space = spaces.Box(low=np.array([-3, 0]),
+ high=np.array([3, 1]),
+ dtype=np.float32)
# 环境状态的维度
- self.observation_space = spaces.Box(
- low=0, high=1, shape=(19,), dtype=np.float32)
+ self.observation_space = spaces.Box(low=0,
+ high=1,
+ shape=(19, ),
+ dtype=np.float32)
-
def seed(self, seed):
random.seed(seed)
np.random.seed(seed)
-
# 处理状态
def _next_observation(self):
# 有些股票数据缺失一些数据,处理一下
d10 = self.df.loc[self.current_step, 'peTTM'] / 1e4
d11 = self.df.loc[self.current_step, 'pbMRQ'] / 100
d12 = self.df.loc[self.current_step, 'psTTM'] / 100
- if np.isnan(d10): # 某些数据是0.00000000e+00,如果是nan会报错
+ if np.isnan(d10): # 某些数据是0.00000000e+00,如果是nan会报错
d10 = d11 = d12 = 0.00000000e+00
obs = np.array([
self.df.loc[self.current_step, 'open'] / MAX_SHARE_PRICE,
@@ -74,23 +75,22 @@ def _next_observation(self):
])
return obs
-
# 执行当前动作,并计算出当前的数据(如:资产等)
def _take_action(self, action):
# 随机设置当前的价格,其范围上界为当前时间点的价格
- current_price = random.uniform(
- self.df.loc[self.current_step, "open"], self.df.loc[self.current_step, "close"])
+ current_price = random.uniform(self.df.loc[self.current_step, "open"],
+ self.df.loc[self.current_step, "close"])
action_type = action[0]
amount = action[1]
- if action_type > 1: # 买入amount%
+ if action_type > 1: # 买入amount%
total_possible = int(self.balance / current_price)
shares_bought = int(total_possible * amount)
prev_cost = self.cost_basis * self.shares_held
additional_cost = shares_bought * current_price
self.balance -= additional_cost
- self.cost_basis = (
- prev_cost + additional_cost) / (self.shares_held + shares_bought)
+ self.cost_basis = (prev_cost + additional_cost) / (
+ self.shares_held + shares_bought)
self.shares_held += shares_bought
elif action_type < -1: # 卖出amount%
@@ -109,7 +109,6 @@ def _take_action(self, action):
if self.shares_held == 0:
self.cost_basis = 0
-
# 与环境交互
def step(self, action):
# 在环境内执行动作
@@ -130,19 +129,18 @@ def step(self, action):
# 计算相对收益比,并据此来计算奖励
profit = self.net_worth - INITIAL_ACCOUNT_BALANCE
profit_percent = profit / INITIAL_ACCOUNT_BALANCE
- if profit_percent>=0:
- reward = max(1,profit_percent/0.001)
+ if profit_percent >= 0:
+ reward = max(1, profit_percent / 0.001)
else:
reward = -100
- if self.net_worth <= 0 :
+ if self.net_worth <= 0:
done = True
obs = self._next_observation()
return obs, reward, done, {}
-
# 重置环境
def reset(self, new_df=None):
# 重置环境的变量为初始值
@@ -164,16 +162,20 @@ def reset(self, new_df=None):
return self._next_observation()
-
# 显示环境至屏幕
def render(self, mode='human'):
# 打印环境信息
profit = self.net_worth - INITIAL_ACCOUNT_BALANCE
- print('-'*30)
+ print('-' * 30)
print(f'Step: {self.current_step}')
print(f'Balance: {self.balance}')
- print(f'Shares held: {self.shares_held} (Total sold: {self.total_shares_sold})')
- print(f'Avg cost for held shares: {self.cost_basis} (Total sales value: {self.total_sales_value})')
- print(f'Net worth: {self.net_worth} (Max net worth: {self.max_net_worth})')
+ print(
+ f'Shares held: {self.shares_held} (Total sold: {self.total_shares_sold})'
+ )
+ print(
+ f'Avg cost for held shares: {self.cost_basis} (Total sales value: {self.total_sales_value})'
+ )
+ print(
+ f'Net worth: {self.net_worth} (Max net worth: {self.max_net_worth})')
print(f'Profit: {profit}')
- return profit
\ No newline at end of file
+ return profit
diff --git a/examples/DDPG for Stock Trading/images/Actor-network-and-critic-network-in-DDPG.png b/examples/DDPG for Stock Trading/images/Actor-network-and-critic-network-in-DDPG.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210410171729586.png b/examples/DDPG for Stock Trading/images/image-20210410171729586.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210410174646116.png b/examples/DDPG for Stock Trading/images/image-20210410174646116.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210410174849192.png b/examples/DDPG for Stock Trading/images/image-20210410174849192.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210807032130354.png b/examples/DDPG for Stock Trading/images/image-20210807032130354.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210807053203115.png b/examples/DDPG for Stock Trading/images/image-20210807053203115.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210807053359716.png b/examples/DDPG for Stock Trading/images/image-20210807053359716.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210807053722193.png b/examples/DDPG for Stock Trading/images/image-20210807053722193.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210807055813601.png b/examples/DDPG for Stock Trading/images/image-20210807055813601.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210807060104619.png b/examples/DDPG for Stock Trading/images/image-20210807060104619.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210807061418761.png b/examples/DDPG for Stock Trading/images/image-20210807061418761.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210807061611187.png b/examples/DDPG for Stock Trading/images/image-20210807061611187.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210807065202651.png b/examples/DDPG for Stock Trading/images/image-20210807065202651.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/images/image-20210815224035466.png b/examples/DDPG for Stock Trading/images/image-20210815224035466.png
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/model.py b/examples/DDPG for Stock Trading/model.py
old mode 100644
new mode 100755
index ce49f0768..35563f931
--- a/examples/DDPG for Stock Trading/model.py
+++ b/examples/DDPG for Stock Trading/model.py
@@ -19,7 +19,6 @@ def __init__(self, state_dim, action_dim, max_action):
self.max_action = max_action
-
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
@@ -31,7 +30,7 @@ def forward(self, state):
class Critic(nn.Layer):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
-
+
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
@@ -44,27 +43,29 @@ def forward(self, state, action):
# DDPG算法模型
class DDPGModel(object):
- def __init__(self, state_dim, action_dim, max_action, gamma = 0.99, tau = 0.001):
+ def __init__(self, state_dim, action_dim, max_action, gamma=0.99,
+ tau=0.001):
# 动作网络与目标动作网络
self.actor = Actor(state_dim, action_dim, max_action)
self.actor_target = copy.deepcopy(self.actor)
- self.actor_optimizer = optim.Adam(parameters=self.actor.parameters(), learning_rate=1e-4)
+ self.actor_optimizer = optim.Adam(
+ parameters=self.actor.parameters(), learning_rate=1e-4)
# 值函数网络与目标值函数网络
self.critic = Critic(state_dim, action_dim)
self.critic_target = copy.deepcopy(self.critic)
- self.critic_optimizer = optim.Adam(parameters=self.critic.parameters(), weight_decay=1e-2)
+ self.critic_optimizer = optim.Adam(
+ parameters=self.critic.parameters(), weight_decay=1e-2)
self.gamma = gamma
self.tau = tau
-
# 根据当前状态,选择动作:过一个动作网络得到动作
def select_action(self, state):
- state = paddle.to_tensor(state.reshape(1, -1), dtype='float32', place=device)
+ state = paddle.to_tensor(
+ state.reshape(1, -1), dtype='float32', place=device)
return self.actor(state).numpy().flatten()
-
# 训练函数
def train(self, replay_buffer, batch=64):
# 从缓存容器中采样
@@ -72,7 +73,7 @@ def train(self, replay_buffer, batch=64):
# 计算目标网络q值
q_target = self.critic_target(next_state, self.actor_target(next_state))
- q_target = reward + ((1- done) * self.gamma * q_target).detach()
+ q_target = reward + ((1 - done) * self.gamma * q_target).detach()
# 计算当前网络q值
q_eval = self.critic(state, action)
@@ -96,29 +97,35 @@ def train(self, replay_buffer, batch=64):
self.actor_optimizer.step()
# 更新目标网络参数
- for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
- target_param.set_value(target_param * (1.0 - self.tau) + param * self.tau)
- for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
- target_param.set_value(target_param * (1.0 - self.tau) + param * self.tau)
-
+ for param, target_param in zip(self.critic.parameters(),
+ self.critic_target.parameters()):
+ target_param.set_value(target_param * (1.0 - self.tau) + param *
+ self.tau)
+ for param, target_param in zip(self.actor.parameters(),
+ self.actor_target.parameters()):
+ target_param.set_value(target_param * (1.0 - self.tau) + param *
+ self.tau)
+
+# 保存模型参数
- # 保存模型参数
def save(self, filename):
paddle.save(self.critic.state_dict(), filename + '_critic')
- paddle.save(self.critic_optimizer.state_dict(), filename + '_critic_optimizer')
+ paddle.save(self.critic_optimizer.state_dict(),
+ filename + '_critic_optimizer')
paddle.save(self.actor.state_dict(), filename + '_actor')
- paddle.save(self.actor_optimizer.state_dict(), filename + '_actor_optimizer')
-
+ paddle.save(self.actor_optimizer.state_dict(),
+ filename + '_actor_optimizer')
+
+# 导入模型参数
- # 导入模型参数
def load(self, filename):
self.critic.set_state_dict(paddle.load(filename + '_critic'))
- self.critic_optimizer.set_state_dict(paddle.load(filename + '_critic_optimizer'))
+ self.critic_optimizer.set_state_dict(
+ paddle.load(filename + '_critic_optimizer'))
self.critic_target = copy.deepcopy(self.critic)
self.actor.set_state_dict(paddle.load(filename + '_actor'))
- self.actor_optimizer.set_state_dict(paddle.load(filename + '_actor_optimizer'))
+ self.actor_optimizer.set_state_dict(
+ paddle.load(filename + '_actor_optimizer'))
self.actor_target = copy.deepcopy(self.actor)
-
-
\ No newline at end of file
diff --git a/examples/DDPG for Stock Trading/models/home/aistudio/models/DDPG_Stock_123_actor b/examples/DDPG for Stock Trading/models/home/aistudio/models/DDPG_Stock_123_actor
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/models/home/aistudio/models/DDPG_Stock_123_actor_optimizer b/examples/DDPG for Stock Trading/models/home/aistudio/models/DDPG_Stock_123_actor_optimizer
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/models/home/aistudio/models/DDPG_Stock_123_critic b/examples/DDPG for Stock Trading/models/home/aistudio/models/DDPG_Stock_123_critic
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/models/home/aistudio/models/DDPG_Stock_123_critic_optimizer b/examples/DDPG for Stock Trading/models/home/aistudio/models/DDPG_Stock_123_critic_optimizer
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/results/home/aistudio/results/DDPG_Stock_123.npy b/examples/DDPG for Stock Trading/results/home/aistudio/results/DDPG_Stock_123.npy
old mode 100644
new mode 100755
diff --git a/examples/DDPG for Stock Trading/test.py b/examples/DDPG for Stock Trading/test.py
old mode 100644
new mode 100755
index 31aa615ac..f347a3600
--- a/examples/DDPG for Stock Trading/test.py
+++ b/examples/DDPG for Stock Trading/test.py
@@ -4,13 +4,11 @@
import os
from visualdl import LogWriter
-
import model
import ReplayBuffer
import StockEnv
import pandas as pd
-
# 导入数据
df = pd.read_csv('data/data102715/test.csv')
# df = df.sort_values('date')
@@ -19,6 +17,7 @@
# 测试环境使用的随机种子
eval_seed = [53, 47, 99, 107, 1, 17, 57, 97, 179, 777]
+
# 评估模型的函数
def eval_policy(policy, df, seed, eval_episodes=10):
avg_reward = 0.
@@ -26,7 +25,7 @@ def eval_policy(policy, df, seed, eval_episodes=10):
# 初始化评估环境并设定随机种子
eval_env = StockEnv.StockTradingEnv(df)
eval_env.seed(seed + eval_seed[epi])
-
+
# 初始化评估环境
state, done = eval_env.reset(), False
t = 0
@@ -35,16 +34,16 @@ def eval_policy(policy, df, seed, eval_episodes=10):
# 模型与环境交互
while not done:
action = policy.select_action(state)
- action[0] *=3
+ action[0] *= 3
state, reward, done, _ = eval_env.step(action)
writer.add_scalar(tag='reward', step=t, value=reward)
t += 1
epi_reward += reward
avg_reward += reward
-
+
# 可视化整个幕的奖励
writer.add_scalar(tag='episode_reward', step=epi, value=epi_reward)
-
+
# 计算得到平均奖励
avg_reward /= eval_episodes
@@ -54,6 +53,7 @@ def eval_policy(policy, df, seed, eval_episodes=10):
return avg_reward
+
# 默认的超参数
default_seed = 123
@@ -90,5 +90,3 @@ def eval_policy(policy, df, seed, eval_episodes=10):
# 做评估
evaluations = [eval_policy(policy, df, args.seed)]
-
-
\ No newline at end of file
diff --git a/examples/DDPG for Stock Trading/train.py b/examples/DDPG for Stock Trading/train.py
old mode 100644
new mode 100755
index ae27cd735..b99f38a13
--- a/examples/DDPG for Stock Trading/train.py
+++ b/examples/DDPG for Stock Trading/train.py
@@ -4,15 +4,14 @@
import os
from visualdl import LogWriter
-
import model
import ReplayBuffer
import StockEnv
import pandas as pd
-
# 获得数据
df = pd.read_csv('data/data102715/train.csv')
+
# df = df.sort_values('date')
@@ -26,7 +25,7 @@ def eval_policy(policy, df, seed, eval_episodes=10):
for _ in range(eval_episodes):
# 初始化环境
state, done = eval_env.reset(), False
-
+
# 与环境交互
while not done:
action = policy.select_action(state)
@@ -34,7 +33,7 @@ def eval_policy(policy, df, seed, eval_episodes=10):
action[0] *= 3
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
-
+
# 计算平均奖励
avg_reward /= eval_episodes
@@ -46,13 +45,13 @@ def eval_policy(policy, df, seed, eval_episodes=10):
# 默认的超参数
-default_seed = 123 # 随机种子
-default_batch = 64 # 批量大小
-default_gamma = 0.95 # 折扣因子
-default_tau = 0.005 # 当前网络参数比例,用于更新目标网络
-default_timesteps = 2e5 # 训练步数
-default_expl_noise = 0.1 # 高斯噪声
-default_eval_freq = 6e3 # 评估模型的频率
+default_seed = 123 # 随机种子
+default_batch = 64 # 批量大小
+default_gamma = 0.95 # 折扣因子
+default_tau = 0.005 # 当前网络参数比例,用于更新目标网络
+default_timesteps = 2e5 # 训练步数
+default_expl_noise = 0.1 # 高斯噪声
+default_eval_freq = 6e3 # 评估模型的频率
# 参数语法解析器
parser = argparse.ArgumentParser()
@@ -73,7 +72,7 @@ def eval_policy(policy, df, seed, eval_episodes=10):
if __name__ == '__main__':
# 路径设置
if not os.path.exists("./results"):
- os.makedirs('./results')
+ os.makedirs('./results')
if args.save_model and not os.path.exists("./models"):
os.makedirs('./models')
@@ -106,12 +105,12 @@ def eval_policy(policy, df, seed, eval_episodes=10):
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
policy.load(f'./models/{policy_file}')
-
+
# 设置缓存容器
replay_buffer = ReplayBuffer.ReplayBuffer(state_dim, action_dim)
# 评估初始环境:对照
- evaluations = [eval_policy(policy, df, args.seed)]
+ evaluations = [eval_policy(policy, df, args.seed)]
# 初始化环境
state, done = env.reset(), False
@@ -125,10 +124,9 @@ def eval_policy(policy, df, seed, eval_episodes=10):
episode_timesteps += 1
# 根据状态得到动作
- action = (
- policy.select_action(np.array(state))
- + np.random.normal(0, max_action * args.expl_noise, size=action_dim)
- ).clip(-max_action, max_action)
+ action = (policy.select_action(np.array(state)) + np.random.normal(
+ 0, max_action * args.expl_noise, size=action_dim)).clip(-max_action,
+ max_action)
action[0] *= 3
print('action', action)
@@ -150,9 +148,12 @@ def eval_policy(policy, df, seed, eval_episodes=10):
# 该轮交互结束
if done:
# 打印信息,重置状态
- print(f'Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}')
+ print(
+ f'Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}'
+ )
# Reset environment
- writer.add_scalar(tag='episode_reward', step=episode_num, value= episode_reward)
+ writer.add_scalar(
+ tag='episode_reward', step=episode_num, value=episode_reward)
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
@@ -162,6 +163,4 @@ def eval_policy(policy, df, seed, eval_episodes=10):
if (t + 1) % args.eval_freq == 0:
evaluations.append(eval_policy(policy, df, args.seed))
np.save(f"./results/{file_name}", evaluations)
- if args.save_model: policy.save(f"./models/{file_name}")
-
-
\ No newline at end of file
+ if args.save_model: policy.save(f"./models/{file_name}")
diff --git a/examples/DeepFM for CTR Prediction/Dataset.py b/examples/DeepFM for CTR Prediction/Dataset.py
old mode 100644
new mode 100755
index e54fd14ff..43e875ada
--- a/examples/DeepFM for CTR Prediction/Dataset.py
+++ b/examples/DeepFM for CTR Prediction/Dataset.py
@@ -1,31 +1,41 @@
import os
import numpy as np
+
+
class DeepFM_Dataset():
#读取数据集中的数据
- def __init__(self,batchsize,dataFileDir,sparse_num_field=26,dense_feature_dim=13):
- self.batchsize=batchsize
- self.dataFileDir=dataFileDir
- self.dataFiles=os.listdir(dataFileDir)
- self.next_batch_reader=self.nextBatch()
+ def __init__(self,
+ batchsize,
+ dataFileDir,
+ sparse_num_field=26,
+ dense_feature_dim=13):
+ self.batchsize = batchsize
+ self.dataFileDir = dataFileDir
+ self.dataFiles = os.listdir(dataFileDir)
+ self.next_batch_reader = self.nextBatch()
+
#获得下一批数据
- def nextBatch(self,sparse_num_field=26,dense_feature_dim=13):
- batchData=[]
- cnt=0
+ def nextBatch(self, sparse_num_field=26, dense_feature_dim=13):
+ batchData = []
+ cnt = 0
#从所有的文件中一行一行读取
for dataFile in self.dataFiles:
- with open(self.dataFileDir.strip('/')+'/'+dataFile,'r') as lines:
+ with open(self.dataFileDir.strip('/') + '/' + dataFile,
+ 'r') as lines:
for line in lines:
batchData.append(predata(line))
- cnt+=1
- if cnt % self.batchsize==0:
- batchDataArray=np.array(batchData)
+ cnt += 1
+ if cnt % self.batchsize == 0:
+ batchDataArray = np.array(batchData)
#提取label,稀疏特征,稠密特征
label,sparse_feature,dense_feature=\
batchDataArray[:,0].astype(np.float32),\
batchDataArray[:,1:sparse_num_field+1].astype(np.int),\
batchDataArray[:,-dense_feature_dim:].astype(np.float32)
- yield label,sparse_feature,dense_feature
- batchData=[]
+ yield label, sparse_feature, dense_feature
+ batchData = []
def getNextBatchData(self):
- return next(self.next_batch_reader)# [[label,sparse fea,dense fea],...] shape:[batchsize,1+26+13]
\ No newline at end of file
+ return next(
+ self.next_batch_reader
+ ) # [[label,sparse fea,dense fea],...] shape:[batchsize,1+26+13]
diff --git a/examples/DeepFM for CTR Prediction/DeepFMLayer.py b/examples/DeepFM for CTR Prediction/DeepFMLayer.py
old mode 100644
new mode 100755
index 1808f5265..724a639fb
--- a/examples/DeepFM for CTR Prediction/DeepFMLayer.py
+++ b/examples/DeepFM for CTR Prediction/DeepFMLayer.py
@@ -52,31 +52,18 @@ def __init__(self,
# 一阶稀疏特征
self.sparse_feature_oneOrderWeight = paddle.nn.Embedding(
- sparse_feature_number,
- 1,
- padding_idx=0,
- sparse=True
- )
+ sparse_feature_number, 1, padding_idx=0, sparse=True)
## 一阶连续特征
self.dense_feature_oneOrderWeight = paddle.create_parameter(
- [dense_feature_dim],
- "float32"
- )
+ [dense_feature_dim], "float32")
# 二阶特征
self.sparse_latent_vecs = paddle.nn.Embedding(
- sparse_feature_number,
- embedding_dim,
- padding_idx=0,
- sparse=True
- )
+ sparse_feature_number, embedding_dim, padding_idx=0, sparse=True)
self.dense_latent_vecs = paddle.create_parameter(
- [1, dense_feature_dim, embedding_dim],
- "float32"
- )
+ [1, dense_feature_dim, embedding_dim], "float32")
def forward(self, sparse_feature, dense_feature):
# 一阶特征
-
'''
计算一阶特征: y_1order = 0 + w*x
input [batchsize,field_num]
@@ -84,13 +71,18 @@ def forward(self, sparse_feature, dense_feature):
sum out axis=1:[batchsize,embedDim]
'''
# 稀疏特征查表获得w*x <- w*1 <- w <- lookup Embedding Table
- sparse_wx = self.sparse_feature_oneOrderWeight(sparse_feature) # [batchsize,sparse_field_num,1]
+ sparse_wx = self.sparse_feature_oneOrderWeight(
+ sparse_feature) # [batchsize,sparse_field_num,1]
# 连续特征向量内积w*x
- dense_wx = paddle.multiply(dense_feature, self.dense_feature_oneOrderWeight) # [batchsize,dense_feature_dim]
- dense_wx = paddle.unsqueeze(dense_wx, axis=2) # [batchsize,dense_feature_dim,1]
+ dense_wx = paddle.multiply(
+ dense_feature,
+ self.dense_feature_oneOrderWeight) # [batchsize,dense_feature_dim]
+ dense_wx = paddle.unsqueeze(
+ dense_wx, axis=2) # [batchsize,dense_feature_dim,1]
- y_pred_first_order = paddle.sum(sparse_wx, axis=1) + paddle.sum(dense_wx,
- axis=1) # [batchsize,dense_feature_dim,1]---> [batchsize,1]
+ y_pred_first_order = paddle.sum(sparse_wx, axis=1) + paddle.sum(
+ dense_wx,
+ axis=1) # [batchsize,dense_feature_dim,1]---> [batchsize,1]
# 二阶特征交叉
'''
@@ -99,7 +91,8 @@ def forward(self, sparse_feature, dense_feature):
vi,j * xi的平方和 减去 vi,j * vi 的和的平方,再取1/2
'''
# 稀疏特征查表: vij*xi<-vij *1
- sparse_vx = self.sparse_latent_vecs(sparse_feature) # [batchsize,sparse_field_num,embed_dim]
+ sparse_vx = self.sparse_latent_vecs(
+ sparse_feature) # [batchsize,sparse_field_num,embed_dim]
'''
连续特征矩阵乘法:
@@ -107,19 +100,28 @@ def forward(self, sparse_feature, dense_feature):
dense_latent_vecs:[1,dense_fea_dim,embed_dim]
vij*xi <- 广播逐元素乘法(dense_fea,dense_latent_vecs) #[batchsize,dense_fea_dim,embed_dim]
'''
- dense_x = paddle.unsqueeze(dense_feature, axis=2) # [batchsize,dense_fea_dim]->[batchsize,dense_fea_dim,1]
- dense_vx = paddle.multiply(dense_x, self.dense_latent_vecs) # [batchsize,dense_fea_dim,embed_dim]
-
- concat_vx = paddle.concat([sparse_vx, dense_vx], axis=1) # [batchsize,sparse_field_num+dense_fea_dim,embed_dim]
+ dense_x = paddle.unsqueeze(
+ dense_feature,
+ axis=2) # [batchsize,dense_fea_dim]->[batchsize,dense_fea_dim,1]
+ dense_vx = paddle.multiply(
+ dense_x,
+ self.dense_latent_vecs) # [batchsize,dense_fea_dim,embed_dim]
+
+ concat_vx = paddle.concat(
+ [sparse_vx, dense_vx],
+ axis=1) # [batchsize,sparse_field_num+dense_fea_dim,embed_dim]
embedding = concat_vx
# 平方的和
- concat_vx_square = paddle.square(concat_vx) # [batchsize,sparse_field_num+dense_fea_dim,embed_dim]
- square_sum = paddle.sum(concat_vx_square, axis=1) # [batchsize,embed_dim]
+ concat_vx_square = paddle.square(
+ concat_vx) # [batchsize,sparse_field_num+dense_fea_dim,embed_dim]
+ square_sum = paddle.sum(concat_vx_square,
+ axis=1) # [batchsize,embed_dim]
# 和的平方
concat_vx_sum = paddle.sum(concat_vx, axis=1) # [batchsize,embed_dim]
sum_square = paddle.square(concat_vx_sum) # [batchsize,embed_dim]
- y_pred_second_order = 0.5 * (paddle.sum(sum_square - square_sum, axis=1)) # [batchsize,1]
+ y_pred_second_order = 0.5 * (
+ paddle.sum(sum_square - square_sum, axis=1)) # [batchsize,1]
y_pred_second_order = paddle.unsqueeze(y_pred_second_order, axis=1)
return y_pred_first_order, y_pred_second_order, embedding
@@ -158,4 +160,4 @@ def forward(self, feat_embeddings):
[-1, self.num_field * self.sparse_feature_dim])
for n_layer in self._mlp_layers:
y_dnn = n_layer(y_dnn)
- return y_dnn
\ No newline at end of file
+ return y_dnn
diff --git a/examples/DeepFM for CTR Prediction/Predata.py b/examples/DeepFM for CTR Prediction/Predata.py
old mode 100644
new mode 100755
index 4600ab5fa..dcfee8383
--- a/examples/DeepFM for CTR Prediction/Predata.py
+++ b/examples/DeepFM for CTR Prediction/Predata.py
@@ -3,14 +3,18 @@
# !cd slot_test_data_full/ && ls -lha && head part-220
import os
import subprocess
+
+
def wc_count(file_name):
out = subprocess.getoutput("wc -l %s" % file_name)
return int(out.split()[0])
-def wc_count_dir(dirPath):#统计最终一共有多少行数据
- cnt=0
- fileList=os.listdir(dirPath)
+
+
+def wc_count_dir(dirPath): #统计最终一共有多少行数据
+ cnt = 0
+ fileList = os.listdir(dirPath)
for fileName in fileList:
- cnt+=wc_count(dirPath.strip('/')+'/'+fileName)
+ cnt += wc_count(dirPath.strip('/') + '/' + fileName)
return cnt
@@ -47,7 +51,9 @@ def predata(rawLine):
elif fea not in output.keys(): # 连续特征完全缺失
output[fea] = [padding] * dense_fea_dim # 连续特征部分缺失
elif len(output[fea]) < dense_fea_dim:
- output[fea].extend([padding] * (dense_fea_dim - len(output[fea])))
+ output[fea].extend([padding] *
+ (dense_fea_dim - len(output[fea])))
data = []
- for fea in slots_fea: data.extend(output[fea])
- return data
\ No newline at end of file
+ for fea in slots_fea:
+ data.extend(output[fea])
+ return data
diff --git a/examples/DeepFM for CTR Prediction/Predict.py b/examples/DeepFM for CTR Prediction/Predict.py
old mode 100644
new mode 100755
index bf2dc5fb8..5fd29e465
--- a/examples/DeepFM for CTR Prediction/Predict.py
+++ b/examples/DeepFM for CTR Prediction/Predict.py
@@ -17,7 +17,8 @@ def predict(deepFM_model, deepFM_Dataset, batchnum):
sparse_feature = paddle.to_tensor(data[1], dtype='int64')
dense_feature = paddle.to_tensor(data[2], dtype='float32')
# 得到预测值,为了得到每条样本分属于正负样本的概率,将预测结果和1-predict合并起来得到predicts,以便接下来计算auc
- predicts1 = deepFM_model(sparse_feature, dense_feature) # [batchsize,1]
+ predicts1 = deepFM_model(sparse_feature,
+ dense_feature) # [batchsize,1]
predicts0 = 1 - predicts1 # [batchsize,1]
predicts = paddle.concat([predicts0, predicts1], axis=1)
# 计算auc
@@ -26,8 +27,11 @@ def predict(deepFM_model, deepFM_Dataset, batchnum):
loss = F.binary_cross_entropy(predicts1, label_data)
if batchidx % (batchnum // 20) == 0:
- print(paddle.concat([predicts[:4, ], label_data[:4, ]], axis=1).numpy())
- print("batchidx:{} loss:{} auc:{}".format(batchidx, loss.numpy(), auc.accumulate()))
+ print(
+ paddle.concat(
+ [predicts[:4, ], label_data[:4, ]], axis=1).numpy())
+ print("batchidx:{} loss:{} auc:{}".format(
+ batchidx, loss.numpy(), auc.accumulate()))
-predict(testDeepFM_model, deepFM_TestDataset, testBatchNum)
\ No newline at end of file
+predict(testDeepFM_model, deepFM_TestDataset, testBatchNum)
diff --git a/examples/DeepFM for CTR Prediction/README.md b/examples/DeepFM for CTR Prediction/README.md
old mode 100644
new mode 100755
index c050516c9..dea3d1af2
--- a/examples/DeepFM for CTR Prediction/README.md
+++ b/examples/DeepFM for CTR Prediction/README.md
@@ -71,7 +71,7 @@ DNN深度神经网络层结构如下图所示:
26个分类离散特征形式如下图所示:

-
+
```python
# 查看数据格式
@@ -118,7 +118,7 @@ def predata(rawLine):
else:continue
if fea not in output.keys():output[fea]=[val]#连续特征缺失,添加新特征
else:output[fea].append(val)#末尾添加
-
+
#填充
if len(output.keys()) != slots:
for fea in slots_fea:
@@ -149,7 +149,7 @@ class DeepFM_Dataset():
batchData=[]
cnt=0
#从所有的文件中一行一行读取
- for dataFile in self.dataFiles:
+ for dataFile in self.dataFiles:
with open(self.dataFileDir.strip('/')+'/'+dataFile,'r') as lines:
for line in lines:
batchData.append(predata(line))
@@ -165,7 +165,7 @@ class DeepFM_Dataset():
batchData=[]
def getNextBatchData(self):
- return next(self.next_batch_reader)# [[label,sparse fea,dense fea],...] shape:[batchsize,1+26+13]
+ return next(self.next_batch_reader)# [[label,sparse fea,dense fea],...] shape:[batchsize,1+26+13]
```
@@ -216,17 +216,17 @@ class DeepFMLayer(nn.Layer):
class FM(nn.Layer):
#FM层,负责抽取low-order特征
def __init__(self,
- sparse_feature_number = 1000001,
+ sparse_feature_number = 1000001,
sparse_feature_dim = 9,
dense_feature_dim = 13,
sparse_num_field = 26):
super(FM, self).__init__()
- self.sparse_feature_number = sparse_feature_number # 1000001
+ self.sparse_feature_number = sparse_feature_number # 1000001
self.sparse_feature_dim = sparse_feature_dim# 9
self.dense_feature_dim = dense_feature_dim#13
self.sparse_num_field = sparse_num_field# sparse_inputs_slots-1==>26
self.layer_sizes = layer_sizes# fc_sizes: [512, 256, 128, 32]
-
+
# 一阶稀疏特征
self.sparse_feature_oneOrderWeight=paddle.nn.Embedding(
sparse_feature_number,
@@ -252,7 +252,7 @@ class FM(nn.Layer):
)
def forward(self,sparse_feature,dense_feature):
- # 一阶特征
+ # 一阶特征
'''
计算一阶特征: y_1order = 0 + w*x
@@ -267,25 +267,25 @@ class FM(nn.Layer):
dense_wx=paddle.unsqueeze(dense_wx, axis=2)# [batchsize,dense_feature_dim,1]
y_pred_first_order=paddle.sum(sparse_wx,axis=1)+paddle.sum(dense_wx,axis=1)# [batchsize,dense_feature_dim,1]---> [batchsize,1]
-
+
# 二阶特征交叉
'''
y_2order=\sum{xi xj}
优化后计算公式为:
- vi,j * xi的平方和 减去 vi,j * vi 的和的平方,再取1/2
+ vi,j * xi的平方和 减去 vi,j * vi 的和的平方,再取1/2
'''
#稀疏特征查表: vij*xi<-vij *1
sparse_vx= self.sparse_latent_vecs(sparse_feature) # [batchsize,sparse_field_num,embed_dim]
'''
连续特征矩阵乘法:
-
+
dense_fea: [batchsize,dense_fea_dim,1]
dense_latent_vecs:[1,dense_fea_dim,embed_dim]
vij*xi <- 广播逐元素乘法(dense_fea,dense_latent_vecs) #[batchsize,dense_fea_dim,embed_dim]
'''
dense_x=paddle.unsqueeze(dense_feature,axis=2) # [batchsize,dense_fea_dim]->[batchsize,dense_fea_dim,1]
dense_vx=paddle.multiply(dense_x,self.dense_latent_vecs)#[batchsize,dense_fea_dim,embed_dim]
-
+
concat_vx=paddle.concat([sparse_vx,dense_vx],axis=1)#[batchsize,sparse_field_num+dense_fea_dim,embed_dim]
embedding=concat_vx
#平方的和
@@ -398,7 +398,7 @@ def train(
print("processing:{}%".format(100*batchidx/batchnum))
print("label data 0-num: {0} 1-num:{1}".format( np.sum(data[0]<0.5),np.sum(data[0]>0.5) ) )
print("epoch: {}, batch_id: {}, loss : {}, auc: {}".format(epoch, batchidx, loss.numpy(),auc.accumulate()))
-
+
adam.step()
adam.clear_grad()
@@ -522,7 +522,7 @@ def predict(deepFM_model,deepFM_Dataset,batchnum):
auc = paddle.metric.Auc()
auc.update(preds=predicts,labels=label_data)
loss = F.binary_cross_entropy(predicts1, label_data)
-
+
if batchidx % (batchnum//20)==0:
print(paddle.concat([predicts[:4,],label_data[:4,]],axis=1).numpy())
print("batchidx:{} loss:{} auc:{}".format(batchidx,loss.numpy(),auc.accumulate()))
@@ -538,4 +538,4 @@ predict(testDeepFM_model,deepFM_TestDataset,testBatchNum)
| 模型 | auc | loss | batch_size | epoch_num | 训练时间 |
| ------ | ---- | ---- | ---------- | --------- | -------- |
-| DeepFM | 0.74 | 0.47 | 2000 | 1 | 1.5小时 |
\ No newline at end of file
+| DeepFM | 0.74 | 0.47 | 2000 | 1 | 1.5小时 |
diff --git a/examples/DeepFM for CTR Prediction/Save.py b/examples/DeepFM for CTR Prediction/Save.py
old mode 100644
new mode 100755
index 3f5c1411f..648dd9d05
--- a/examples/DeepFM for CTR Prediction/Save.py
+++ b/examples/DeepFM for CTR Prediction/Save.py
@@ -7,9 +7,15 @@
layer_state_dict = paddle.load("./model/deepFM_model.pdparams")
opt_state_dict = paddle.load("./model/adam.pdopt")
-testDeepFM_model=DeepFMLayer(sparse_feature_number = 1000001, sparse_feature_dim = 9,
- dense_feature_dim = 13, sparse_num_field = 26, layer_sizes = [512, 256, 128, 32])
-testAdam = paddle.optimizer.Adam(learning_rate=learning_rate, parameters=testDeepFM_model.parameters())# Adam优化器
+testDeepFM_model = DeepFMLayer(
+ sparse_feature_number=1000001,
+ sparse_feature_dim=9,
+ dense_feature_dim=13,
+ sparse_num_field=26,
+ layer_sizes=[512, 256, 128, 32])
+testAdam = paddle.optimizer.Adam(
+ learning_rate=learning_rate,
+ parameters=testDeepFM_model.parameters()) # Adam优化器
testDeepFM_model.set_state_dict(layer_state_dict)
-testAdam.set_state_dict(opt_state_dict)
\ No newline at end of file
+testAdam.set_state_dict(opt_state_dict)
diff --git a/examples/DeepFM for CTR Prediction/Train.py b/examples/DeepFM for CTR Prediction/Train.py
old mode 100644
new mode 100755
index 68226003e..bc4b32500
--- a/examples/DeepFM for CTR Prediction/Train.py
+++ b/examples/DeepFM for CTR Prediction/Train.py
@@ -5,44 +5,44 @@
import numpy as np
#模型参数
-sparse_feature_number = 1000001 # 1000001 离散特征数
-embedding_dim = 9# 9 嵌入层维度
-dense_feature_dim = 13#13 稠密特征维度
-sparse_num_field = 26# sparse_inputs_slots-1==>26 稀疏特征维度
-layer_sizes = [512, 256, 128, 32]# fc_sizes: [512, 256, 128, 32] 隐藏层数量
+sparse_feature_number = 1000001 # 1000001 离散特征数
+embedding_dim = 9 # 9 嵌入层维度
+dense_feature_dim = 13 #13 稠密特征维度
+sparse_num_field = 26 # sparse_inputs_slots-1==>26 稀疏特征维度
+layer_sizes = [512, 256, 128, 32] # fc_sizes: [512, 256, 128, 32] 隐藏层数量
#训练参数
epochs = 2
-batchsize=50
-learning_rate=1e-3
-
+batchsize = 50
+learning_rate = 1e-3
-def train(
- deepFM_model,
- deepFM_Dataset,
- batchnum,
- optimizer,
- sparse_feature_number=1000001,
- embedding_dim=9,
- dense_feature_dim=13,
- sparse_num_field=26,
- layer_sizes=[512, 256, 128, 32],
- epochs=1,
- batchsize=500,
- learning_rate=1e-3):
+def train(deepFM_model,
+ deepFM_Dataset,
+ batchnum,
+ optimizer,
+ sparse_feature_number=1000001,
+ embedding_dim=9,
+ dense_feature_dim=13,
+ sparse_num_field=26,
+ layer_sizes=[512, 256, 128, 32],
+ epochs=1,
+ batchsize=500,
+ learning_rate=1e-3):
lossFunc = F.binary_cross_entropy
for epoch in range(epochs):
for batchidx in range(batchnum):
# 加载训练数据
data = deepFM_Dataset.getNextBatchData()
- label_data = paddle.to_tensor(data[0], dtype='float32') # [batchsize,]
+ label_data = paddle.to_tensor(
+ data[0], dtype='float32') # [batchsize,]
label_data = paddle.unsqueeze(label_data, axis=1) # [batchsize,1]
# 得到稀疏/稠密特征
sparse_feature = paddle.to_tensor(data[1], dtype='int64')
dense_feature = paddle.to_tensor(data[2], dtype='float32')
# 得到预测值,为了得到每条样本分属于正负样本的概率,将预测结果和1-predict合并起来得到predicts,以便接下来计算auc
- predicts1 = deepFM_model(sparse_feature, dense_feature) # [batchsize,1]
+ predicts1 = deepFM_model(sparse_feature,
+ dense_feature) # [batchsize,1]
predicts0 = 1 - predicts1 # [batchsize,1]
predicts = paddle.concat([predicts0, predicts1], axis=1)
# 计算auc指标
@@ -52,9 +52,10 @@ def train(
loss.backward()
if batchidx % (batchnum // 220) == 0:
print("processing:{}%".format(100 * batchidx / batchnum))
- print("label data 0-num: {0} 1-num:{1}".format(np.sum(data[0] < 0.5), np.sum(data[0] > 0.5)))
- print("epoch: {}, batch_id: {}, loss : {}, auc: {}".format(epoch, batchidx, loss.numpy(),
- auc.accumulate()))
+ print("label data 0-num: {0} 1-num:{1}".format(
+ np.sum(data[0] < 0.5), np.sum(data[0] > 0.5)))
+ print("epoch: {}, batch_id: {}, loss : {}, auc: {}".format(
+ epoch, batchidx, loss.numpy(), auc.accumulate()))
adam.step()
adam.clear_grad()
@@ -69,8 +70,17 @@ def train(
trainBatchNum = trainFilesLineNum // batchsize
deepFM_TrainDataset = DeepFM_Dataset(batchsize, trainFilePath)
-deepFM_model = DeepFMLayer(sparse_feature_number, embedding_dim, dense_feature_dim, sparse_num_field, layer_sizes)
-adam = paddle.optimizer.Adam(learning_rate=learning_rate, parameters=deepFM_model.parameters()) # Adam优化器
+deepFM_model = DeepFMLayer(sparse_feature_number, embedding_dim,
+ dense_feature_dim, sparse_num_field, layer_sizes)
+adam = paddle.optimizer.Adam(
+ learning_rate=learning_rate,
+ parameters=deepFM_model.parameters()) # Adam优化器
-train(deepFM_model, deepFM_TrainDataset, epochs=epochs, batchsize=batchsize, batchnum=trainBatchNum,
- learning_rate=learning_rate, optimizer=adam)
\ No newline at end of file
+train(
+ deepFM_model,
+ deepFM_TrainDataset,
+ epochs=epochs,
+ batchsize=batchsize,
+ batchnum=trainBatchNum,
+ learning_rate=learning_rate,
+ optimizer=adam)
diff --git a/examples/DeepFM for CTR Prediction/Visualization.py b/examples/DeepFM for CTR Prediction/Visualization.py
old mode 100644
new mode 100755
index d0449bd85..366bd103e
--- a/examples/DeepFM for CTR Prediction/Visualization.py
+++ b/examples/DeepFM for CTR Prediction/Visualization.py
@@ -2,7 +2,7 @@
import matplotlib.pyplot as plt
import xlrd
#plt的字体选择中文四黑
-plt.rcParams['font.sans-serif']=['SimHei']
+plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 打开一个workbook
@@ -19,10 +19,9 @@
loss = mySheet.col_values(1)
print(loss)
time = mySheet.col(0)
-print('time1',time)
+print('time1', time)
time = [x.value for x in time]
-print('time2',time)
-
+print('time2', time)
#去掉标题行
loss.pop(0)
@@ -32,9 +31,9 @@
fig = plt.figure(1)
# plot loss
-plt.plot(time,loss)
+plt.plot(time, loss)
plt.title('损失度loss随训练完成度变化曲线')
plt.ylabel('loss')
-plt.xticks(range(0,1))
-plt.show()
\ No newline at end of file
+plt.xticks(range(0, 1))
+plt.show()
diff --git a/examples/USTC/homework.md b/examples/USTC/homework.md
old mode 100644
new mode 100755
index 81e93f26b..dfbbc56c2
--- a/examples/USTC/homework.md
+++ b/examples/USTC/homework.md
@@ -15,14 +15,14 @@ https://github.com/PaddlePaddle/PaddleClas
要求:
-实训作业分为理论讲解和代码实现两部分,模型理论要求对实训作业中使用的模型进行详细的解释,图文并茂为佳。
+实训作业分为理论讲解和代码实现两部分,模型理论要求对实训作业中使用的模型进行详细的解释,图文并茂为佳。
代码实现部分书写流程如下:
-1. 实验设计逻辑:解释任务,说明实验设计逻辑
-2. 数据处理:解释数据集,处理数据为模型输入格式
-3. 模型设计:根据任务设计模型,需要给出模型设计图
-4. 训练配置:定义模型训练的超参数,模型实例化,指定训练的cpu或gpu资 源,定义优化器等等
-5. 模型训练与评估:训练模型,在训练过程中,根据开发集适时打印结果
-6. 模型推理:设计一个接口函数,通过这个接口函数能够方便地对任意一个样本进行实时预测
+1. 实验设计逻辑:解释任务,说明实验设计逻辑
+2. 数据处理:解释数据集,处理数据为模型输入格式
+3. 模型设计:根据任务设计模型,需要给出模型设计图
+4. 训练配置:定义模型训练的超参数,模型实例化,指定训练的cpu或gpu资 源,定义优化器等等
+5. 模型训练与评估:训练模型,在训练过程中,根据开发集适时打印结果
+6. 模型推理:设计一个接口函数,通过这个接口函数能够方便地对任意一个样本进行实时预测
特殊任务如强化学习可以省略数据处理和模型推理部分。书写流程可参考:https://aistudio.baidu.com/aistudio/projectdetail/2023570
@@ -36,5 +36,3 @@ https://github.com/PaddlePaddle/awesome-DeepLearning/tree/develop/examples
提交pr流程:
https://github.com/PaddlePaddle/awesome-DeepLearning/blob/develop/examples/awesome-DeepLearning_pr_procedure.md
-
-
diff --git a/examples/awesome-DeepLearning_pr_procedure.md b/examples/awesome-DeepLearning_pr_procedure.md
old mode 100644
new mode 100755
diff --git a/examples/homework.md b/examples/homework.md
old mode 100644
new mode 100755
index 8a791cb2c..7e020e4e5
--- a/examples/homework.md
+++ b/examples/homework.md
@@ -1,28 +1,28 @@
【作业】
-1. 使用CIFAR10数据集,基于EffNet网络实现图像分类。
-2. 使用CIFAR10数据集,基于DarkNet网络实现图像分类。
-3. 在眼疾识别数据集上训练SENet网络。
-4. 在眼疾识别数据集上训练SqueezeNet网络。
-5. 在眼疾识别数据集上训练DPN网络。
-6. 使用THUCNews标题数据集,基于textcnn网络实现文本分类。
-7. 基于LSTM网络训练一个语言模型,并尝试用于下一个词预测任务进行效果验证。
-8. 使用LCQMC数据集,基于LSTM网络训练一个文本匹配模型。
-9. 使用ChnSentiCorp数据集,基于GRU网络完成情感分析模型。
-10. 手动实现LSTM模型,并尝试利用IMDB数据集进行情感分析。
+1. 使用CIFAR10数据集,基于EffNet网络实现图像分类。
+2. 使用CIFAR10数据集,基于DarkNet网络实现图像分类。
+3. 在眼疾识别数据集上训练SENet网络。
+4. 在眼疾识别数据集上训练SqueezeNet网络。
+5. 在眼疾识别数据集上训练DPN网络。
+6. 使用THUCNews标题数据集,基于textcnn网络实现文本分类。
+7. 基于LSTM网络训练一个语言模型,并尝试用于下一个词预测任务进行效果验证。
+8. 使用LCQMC数据集,基于LSTM网络训练一个文本匹配模型。
+9. 使用ChnSentiCorp数据集,基于GRU网络完成情感分析模型。
+10. 手动实现LSTM模型,并尝试利用IMDB数据集进行情感分析。
参考链接:https://aistudio.baidu.com/aistudio/education/group/info/1297
https://github.com/PaddlePaddle/PaddleClas
要求:
-实训作业分为理论讲解和代码实现两部分,模型理论要求对实训作业中使用的模型进行详细的解释,图文并茂为佳。
+实训作业分为理论讲解和代码实现两部分,模型理论要求对实训作业中使用的模型进行详细的解释,图文并茂为佳。
代码实现部分书写流程如下:
-1. 实验设计逻辑:解释任务,说明实验设计逻辑
-2. 数据处理:解释数据集,处理数据为模型输入格式
-3. 模型设计:根据任务设计模型,需要给出模型设计图
-4. 训练配置:定义模型训练的超参数,模型实例化,指定训练的cpu或gpu资 源,定义优化器等等
-5. 模型训练与评估:训练模型,在训练过程中,根据开发集适时打印结果
-6. 模型推理:设计一个接口函数,通过这个接口函数能够方便地对任意一个样本进行实时预测
+1. 实验设计逻辑:解释任务,说明实验设计逻辑
+2. 数据处理:解释数据集,处理数据为模型输入格式
+3. 模型设计:根据任务设计模型,需要给出模型设计图
+4. 训练配置:定义模型训练的超参数,模型实例化,指定训练的cpu或gpu资 源,定义优化器等等
+5. 模型训练与评估:训练模型,在训练过程中,根据开发集适时打印结果
+6. 模型推理:设计一个接口函数,通过这个接口函数能够方便地对任意一个样本进行实时预测
特殊任务如强化学习可以省略数据处理和模型推理部分。书写流程可参考:https://aistudio.baidu.com/aistudio/projectdetail/2023570
@@ -36,5 +36,3 @@ https://github.com/PaddlePaddle/awesome-DeepLearning/tree/master/examples
提交pr流程:
https://github.com/PaddlePaddle/awesome-DeepLearning/blob/master/examples/awesome-DeepLearning_pr_procedure.md
-
-
diff --git a/examples/recruit_sig.pdf b/examples/recruit_sig.pdf
old mode 100644
new mode 100755
diff --git a/examples/template/README.md b/examples/template/README.md
old mode 100644
new mode 100755
index 312cbf882..7726d1289
--- a/examples/template/README.md
+++ b/examples/template/README.md
@@ -6,13 +6,13 @@
作业包括深度学习基础知识和代码实践,比如下面的形式:
-### 1. 深度学习基础知识
+### 1. 深度学习基础知识
+ CNN-DSSM知识点补充:补充DSSM变体模型的知识点,主要包括:概念,模型,作用,场景,优缺点等。 (10分)
+ LSTM-DSSM知识点补充:补充DSSM变体模型的知识点,主要包括:概念,模型,作用,场景,优缺点等。
-+ MMoE多任务学习知识点补充:补充主流的推荐模型MMoE模型的知识点,主要包括:概念,模型,作用,场景,优缺点等。
-+ ShareBottom多任务学习知识点补充:补充经典的多任务学习ShareBottom模型的知识点,主要包括:概念,模型,作用,场景,优缺点等。
-+ YouTube深度学习视频推荐系统知识点:补充视频推荐的经典架构知识点,主要包括:概念,流程,原理,作用,优缺点等。
++ MMoE多任务学习知识点补充:补充主流的推荐模型MMoE模型的知识点,主要包括:概念,模型,作用,场景,优缺点等。
++ ShareBottom多任务学习知识点补充:补充经典的多任务学习ShareBottom模型的知识点,主要包括:概念,模型,作用,场景,优缺点等。
++ YouTube深度学习视频推荐系统知识点:补充视频推荐的经典架构知识点,主要包括:概念,流程,原理,作用,优缺点等。
### 2. 代码实践
@@ -38,7 +38,7 @@
需要解释每一个python文件的作用和功能,必须包含的文件train.py, predict.py, 知识点的md文件,训练日志training.log, 比如下面的结构:
```
-|-data_process.py: 数据预处理
+|-data_process.py: 数据预处理
|-dataloader.py: 包含构建dataloader工具函数
|-model.py: 模型的定义
|-train.py: 训练文件,训练会输出日志等等
@@ -69,6 +69,3 @@ python train.py
```
python predict.py
```
-
-
-
diff --git a/examples/template/data_process.py b/examples/template/data_process.py
old mode 100644
new mode 100755
diff --git a/examples/template/evaluation.py b/examples/template/evaluation.py
old mode 100644
new mode 100755
diff --git a/examples/template/example.ipynb b/examples/template/example.ipynb
old mode 100644
new mode 100755
diff --git a/examples/template/example.md b/examples/template/example.md
old mode 100644
new mode 100755
index db704be87..4664342b0
--- a/examples/template/example.md
+++ b/examples/template/example.md
@@ -3,4 +3,3 @@
## 二级标题

-
diff --git a/examples/template/images/demo.png b/examples/template/images/demo.png
old mode 100644
new mode 100755
diff --git a/examples/template/predict.py b/examples/template/predict.py
old mode 100644
new mode 100755
diff --git a/examples/template/train.py b/examples/template/train.py
old mode 100644
new mode 100755
diff --git "a/examples/\344\275\234\344\270\232\346\217\220\344\272\244\350\257\246\346\203\205.md" "b/examples/\344\275\234\344\270\232\346\217\220\344\272\244\350\257\246\346\203\205.md"
old mode 100644
new mode 100755
index 54a0e0613..b3672013e
--- "a/examples/\344\275\234\344\270\232\346\217\220\344\272\244\350\257\246\346\203\205.md"
+++ "b/examples/\344\275\234\344\270\232\346\217\220\344\272\244\350\257\246\346\203\205.md"
@@ -1,6 +1,6 @@
# 作业内容
-* 知识点原理编写题*5(共计50分)
+* 知识点原理编写题*5(共计50分)
例: https://github.com/PaddlePaddle/awesome-DeepLearning/blob/master/docs/tutorials/computer_vision/classification/DarkNet.md
@@ -12,7 +12,7 @@
* 向awesome-DeepLearning repo提交pr
-* 在助教作业收集表格中,填写自己提交pr的链接
+* 在助教作业收集表格中,填写自己提交pr的链接
# 评审方式
@@ -23,4 +23,3 @@
3.对pr进行打分排序,前20名将顺利毕业,获得飞桨官方授权结业证书;
4.对于优秀的pr,飞桨官方repo会merge,开绿色招聘通道。
-
diff --git a/junior_class/.README.md.swp b/junior_class/.README.md.swp
old mode 100644
new mode 100755
diff --git a/junior_class/README.md b/junior_class/README.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-1-hands_on_deep_learning/README.md b/junior_class/chapter-1-hands_on_deep_learning/README.md
old mode 100644
new mode 100755
index 872e827c2..7e0389ad2
--- a/junior_class/chapter-1-hands_on_deep_learning/README.md
+++ b/junior_class/chapter-1-hands_on_deep_learning/README.md
@@ -7,4 +7,4 @@
## **案例结构**
本章案例介绍以 notebook 和 code 的形式呈现:
- notebook 给大家提供本章的学习教程,有完整的文字说明内容;为了有更好的阅读方式大家也可以访问 AIStudio 平台上的 [notebook文档](https://aistudio.baidu.com/aistudio/education/teacher/management/1297/teaching) 。
-- code 部分给大家提供了本章内容的完整学习代码,具体使用教程参考 code 部分的[README](./code/README.md)。
\ No newline at end of file
+- code 部分给大家提供了本章内容的完整学习代码,具体使用教程参考 code 部分的[README](./code/README.md)。
diff --git a/junior_class/chapter-1-hands_on_deep_learning/README_en.md b/junior_class/chapter-1-hands_on_deep_learning/README_en.md
old mode 100644
new mode 100755
index e3b8359a9..ce70a7bc8
--- a/junior_class/chapter-1-hands_on_deep_learning/README_en.md
+++ b/junior_class/chapter-1-hands_on_deep_learning/README_en.md
@@ -9,4 +9,4 @@ This chapter takes you to get started quickly with deep learning, including an o
This chapter is presented in the form of notebook and code:
- The notebook provides the learning tutorial of this chapter, with complete text description. In order to have a better reading method, you can also visit the [notebook document](https://aistudio.baidu.com/aistudio/education/group/info/1297/content) on the AIStudio platform.
-- The code section provides a complete learning code. For the specific usage tutorial, please refer to the [README](./code/README_en.md) in the code section.
\ No newline at end of file
+- The code section provides a complete learning code. For the specific usage tutorial, please refer to the [README](./code/README_en.md) in the code section.
diff --git a/junior_class/chapter-1-hands_on_deep_learning/code/1-2-build_neural_network_using_numpy.py b/junior_class/chapter-1-hands_on_deep_learning/code/1-2-build_neural_network_using_numpy.py
old mode 100644
new mode 100755
index 0029e9866..dfb69b1bb
--- a/junior_class/chapter-1-hands_on_deep_learning/code/1-2-build_neural_network_using_numpy.py
+++ b/junior_class/chapter-1-hands_on_deep_learning/code/1-2-build_neural_network_using_numpy.py
@@ -20,6 +20,7 @@
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
+
def load_data():
# 从文件导入数据
datafile = './work/housing.data'
@@ -41,7 +42,8 @@ def load_data():
training_data = data[:offset]
# 计算训练集的最大值,最小值,平均值
- maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), training_data.sum(axis=0) / training_data.shape[0]
+ maximums, minimums, avgs = training_data.max(axis=0), training_data.min(
+ axis=0), training_data.sum(axis=0) / training_data.shape[0]
# 对数据进行归一化处理
for i in range(feature_num):
@@ -53,6 +55,7 @@ def load_data():
test_data = data[offset:]
return training_data, test_data
+
class Network(object):
def __init__(self, num_of_weights):
# 随机产生w的初始值
@@ -60,31 +63,30 @@ def __init__(self, num_of_weights):
#np.random.seed(0)
self.w = np.random.randn(num_of_weights, 1)
self.b = 0.
-
+
def forward(self, x):
z = np.dot(x, self.w) + self.b
return z
-
+
def loss(self, z, y):
error = z - y
num_samples = error.shape[0]
cost = error * error
cost = np.sum(cost) / num_samples
return cost
-
+
def gradient(self, x, y):
z = self.forward(x)
N = x.shape[0]
- gradient_w = 1. / N * np.sum((z-y) * x, axis=0)
+ gradient_w = 1. / N * np.sum((z - y) * x, axis=0)
gradient_w = gradient_w[:, np.newaxis]
- gradient_b = 1. / N * np.sum(z-y)
+ gradient_b = 1. / N * np.sum(z - y)
return gradient_w, gradient_b
-
- def update(self, gradient_w, gradient_b, eta = 0.01):
+
+ def update(self, gradient_w, gradient_b, eta=0.01):
self.w = self.w - eta * gradient_w
self.b = self.b - eta * gradient_b
-
-
+
def train(self, training_data, num_epochs, batch_size=10, eta=0.01):
n = len(training_data)
losses = []
@@ -93,7 +95,10 @@ def train(self, training_data, num_epochs, batch_size=10, eta=0.01):
# 然后再按每次取batch_size条数据的方式取出
np.random.shuffle(training_data)
# 将训练数据进行拆分,每个mini_batch包含batch_size条的数据
- mini_batches = [training_data[k:k+batch_size] for k in range(0, n, batch_size)]
+ mini_batches = [
+ training_data[k:k + batch_size]
+ for k in range(0, n, batch_size)
+ ]
for iter_id, mini_batch in enumerate(mini_batches):
#print(self.w.shape)
#print(self.b)
@@ -104,10 +109,12 @@ def train(self, training_data, num_epochs, batch_size=10, eta=0.01):
gradient_w, gradient_b = self.gradient(x, y)
self.update(gradient_w, gradient_b, eta)
losses.append(loss)
- print('Epoch {:3d} / iter {:3d}, loss = {:.4f}'.
- format(epoch_id, iter_id, loss))
-
+ print('Epoch {:3d} / iter {:3d}, loss = {:.4f}'.format(
+ epoch_id, iter_id, loss))
+
return losses
+
+
def train():
# 获取数据
train_data, test_data = load_data()
@@ -123,6 +130,7 @@ def train():
plt.plot(plot_x, plot_y)
plt.show()
+
def plot_3D_neural_work_weight():
# 获取数据
training_data, test_data = load_data()
@@ -154,6 +162,7 @@ def plot_3D_neural_work_weight():
ax.plot_surface(w5, w9, losses, rstride=1, cstride=1, cmap='rainbow')
plt.show()
+
if __name__ == '__main__':
plot_3D_neural_work_weight()
- train()
\ No newline at end of file
+ train()
diff --git a/junior_class/chapter-1-hands_on_deep_learning/code/1-4-build_neural_network_using_paddle.py b/junior_class/chapter-1-hands_on_deep_learning/code/1-4-build_neural_network_using_paddle.py
old mode 100644
new mode 100755
index 3ce35be68..15b22cd55
--- a/junior_class/chapter-1-hands_on_deep_learning/code/1-4-build_neural_network_using_paddle.py
+++ b/junior_class/chapter-1-hands_on_deep_learning/code/1-4-build_neural_network_using_paddle.py
@@ -21,13 +21,17 @@
import os
import random
+
def load_data():
# 从文件导入数据
datafile = './work/housing.data'
data = np.fromfile(datafile, sep=' ', dtype=np.float32)
# 每条数据包括14项,其中前面13项是影响因素,第14项是相应的房屋价格中位数
- feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ]
+ feature_names = [
+ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
+ 'PTRATIO', 'B', 'LSTAT', 'MEDV'
+ ]
feature_num = len(feature_names)
# 将原始数据进行Reshape,变成[N, 14]这样的形状
@@ -41,8 +45,9 @@ def load_data():
training_data = data[:offset]
# 计算train数据集的最大值,最小值,平均值
- maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), training_data.sum(axis=0) / training_data.shape[0]
-
+ maximums, minimums, avgs = training_data.max(axis=0), training_data.min(
+ axis=0), training_data.sum(axis=0) / training_data.shape[0]
+
# 记录数据的归一化参数,在预测时对数据做归一化
global max_values
global min_values
@@ -66,10 +71,10 @@ class Regressor(paddle.nn.Layer):
def __init__(self):
# 初始化父类中的一些参数
super(Regressor, self).__init__()
-
+
# 定义一层全连接层,输入维度是13,输出维度是1
self.fc = Linear(in_features=13, out_features=1)
-
+
# 网络的前向计算
def forward(self, inputs):
x = self.fc(inputs)
@@ -85,9 +90,10 @@ def train():
training_data, test_data = load_data()
# 定义优化算法,使用随机梯度下降SGD
# 学习率设置为0.01
- opt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
+ opt = paddle.optimizer.SGD(learning_rate=0.01,
+ parameters=model.parameters())
- EPOCH_NUM = 10 # 设置外层循环次数
+ EPOCH_NUM = 10 # 设置外层循环次数
BATCH_SIZE = 10 # 设置batch大小
# 定义外层循环
@@ -95,24 +101,28 @@ def train():
# 在每轮迭代开始之前,将训练数据的顺序随机的打乱
np.random.shuffle(training_data)
# 将训练数据进行拆分,每个batch包含10条数据
- mini_batches = [training_data[k:k+BATCH_SIZE] for k in range(0, len(training_data), BATCH_SIZE)]
+ mini_batches = [
+ training_data[k:k + BATCH_SIZE]
+ for k in range(0, len(training_data), BATCH_SIZE)
+ ]
# 定义内层循环
for iter_id, mini_batch in enumerate(mini_batches):
- x = np.array(mini_batch[:, :-1]) # 获得当前批次训练数据
- y = np.array(mini_batch[:, -1:]) # 获得当前批次训练标签(真实房价)
+ x = np.array(mini_batch[:, :-1]) # 获得当前批次训练数据
+ y = np.array(mini_batch[:, -1:]) # 获得当前批次训练标签(真实房价)
# 将numpy数据转为飞桨动态图tensor形式
house_features = paddle.to_tensor(x)
prices = paddle.to_tensor(y)
-
+
# 前向计算
predicts = model(house_features)
-
+
# 计算损失
loss = F.square_error_cost(predicts, label=prices)
avg_loss = paddle.mean(loss)
- if iter_id%20==0:
- print("epoch: {}, iter: {}, loss is: {}".format(epoch_id, iter_id, avg_loss.numpy()))
-
+ if iter_id % 20 == 0:
+ print("epoch: {}, iter: {}, loss is: {}".format(
+ epoch_id, iter_id, avg_loss.numpy()))
+
# 反向传播
avg_loss.backward()
# 最小化loss,更新参数
@@ -132,10 +142,11 @@ def load_one_example():
idx = -10
one_data, label = test_data[idx, :-1], test_data[idx, -1]
# 修改该条数据shape为[1,13]
- one_data = one_data.reshape([1,-1])
+ one_data = one_data.reshape([1, -1])
return one_data, label
+
def validation():
model = Regressor()
# 参数为保存模型参数的文件地址
@@ -154,9 +165,10 @@ def validation():
# 对label数据做反归一化处理
label = label * (max_values[-1] - min_values[-1]) + avg_values[-1]
- print("Inference result is {}, the corresponding label is {}".format(predict.numpy(), label))
+ print("Inference result is {}, the corresponding label is {}".format(
+ predict.numpy(), label))
if __name__ == '__main__':
train()
- validation()
\ No newline at end of file
+ validation()
diff --git a/junior_class/chapter-1-hands_on_deep_learning/code/README.md b/junior_class/chapter-1-hands_on_deep_learning/code/README.md
old mode 100644
new mode 100755
index a61c7127d..ac6ec8834
--- a/junior_class/chapter-1-hands_on_deep_learning/code/README.md
+++ b/junior_class/chapter-1-hands_on_deep_learning/code/README.md
@@ -10,17 +10,17 @@
## **项目介绍**
-```buildoutcfg
-|-work: 存放波士顿房价预测数据集
-|-1-2-build_neural_network_using_numpy.py: 使用Python语言和Numpy库构建神经网络模型的脚本
+```buildoutcfg
+|-work: 存放波士顿房价预测数据集
+|-1-2-build_neural_network_using_numpy.py: 使用Python语言和Numpy库构建神经网络模型的脚本
|-1-4-build_neural_network_using_paddle.py: 使用飞桨实现房价预测模型的脚本
```
## **数据集准备**
-
+
下载[housing.data](https://aistudio.baidu.com/aistudio/datasetdetail/58711)数据集到work目录下
-
+
## **训练**
1. 使用Python语言和Numpy库实现房价预测模型训练''' python3 1-2-build_neural_network_using_numpy.py'''
-2. 使用飞桨实现房价预测模型训练''' python3 1-4-build_neural_network_using_paddle.py '''
\ No newline at end of file
+2. 使用飞桨实现房价预测模型训练''' python3 1-4-build_neural_network_using_paddle.py '''
diff --git a/junior_class/chapter-1-hands_on_deep_learning/code/README_en.md b/junior_class/chapter-1-hands_on_deep_learning/code/README_en.md
old mode 100644
new mode 100755
index 8f87e04e5..2678748af
--- a/junior_class/chapter-1-hands_on_deep_learning/code/README_en.md
+++ b/junior_class/chapter-1-hands_on_deep_learning/code/README_en.md
@@ -10,9 +10,9 @@
## **Structure**
-```buildoutcfg
-|-work: Store the Boston housing price prediction dataset
-|-1-2-build_neural_network_using_numpy.py: Script for building neural network model using Python language and Numpy library
+```buildoutcfg
+|-work: Store the Boston housing price prediction dataset
+|-1-2-build_neural_network_using_numpy.py: Script for building neural network model using Python language and Numpy library
|-1-4-build_neural_network_using_paddle.py: Script for realizing housing price prediction model using PaddlePaddle
```
@@ -23,4 +23,4 @@ Download [the dataset](https://aistudio.baidu.com/aistudio/datasetdetail/58711)
## **Train**
1. Train housing price prediction model using Python language and Numpy library''' python3 1-2-build_neural_network_using_numpy.py'''
-2. Train housing price prediction model using PaddlePaddle''' python3 1-4-build_neural_network_using_paddle.py '''
\ No newline at end of file
+2. Train housing price prediction model using PaddlePaddle''' python3 1-4-build_neural_network_using_paddle.py '''
diff --git a/junior_class/chapter-1-hands_on_deep_learning/code/data/housing.data b/junior_class/chapter-1-hands_on_deep_learning/code/data/housing.data
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-1-hands_on_deep_learning/notebook/1-1-overview_of_deep_learning.ipynb b/junior_class/chapter-1-hands_on_deep_learning/notebook/1-1-overview_of_deep_learning.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-1-hands_on_deep_learning/notebook/1-2-implementation_of_neural_network_using_numpy.ipynb b/junior_class/chapter-1-hands_on_deep_learning/notebook/1-2-implementation_of_neural_network_using_numpy.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-1-hands_on_deep_learning/notebook/1-3-starting_to_paddle.ipynb b/junior_class/chapter-1-hands_on_deep_learning/notebook/1-3-starting_to_paddle.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-1-hands_on_deep_learning/notebook/1-4-implementation_of_neural_network_using_paddle.ipynb b/junior_class/chapter-1-hands_on_deep_learning/notebook/1-4-implementation_of_neural_network_using_paddle.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-2-step_to_deep_learning/README.md b/junior_class/chapter-2-step_to_deep_learning/README.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-2-step_to_deep_learning/README_en.md b/junior_class/chapter-2-step_to_deep_learning/README_en.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-2-step_to_deep_learning/code/README.md b/junior_class/chapter-2-step_to_deep_learning/code/README.md
old mode 100644
new mode 100755
index c1f71ec0e..05f959d49
--- a/junior_class/chapter-2-step_to_deep_learning/code/README.md
+++ b/junior_class/chapter-2-step_to_deep_learning/code/README.md
@@ -35,4 +35,4 @@
直接使用 train.py 脚本启动训练。
```
python3 train.py
-```
\ No newline at end of file
+```
diff --git a/junior_class/chapter-2-step_to_deep_learning/code/README_en.md b/junior_class/chapter-2-step_to_deep_learning/code/README_en.md
old mode 100644
new mode 100755
index 754309771..6656b8a87
--- a/junior_class/chapter-2-step_to_deep_learning/code/README_en.md
+++ b/junior_class/chapter-2-step_to_deep_learning/code/README_en.md
@@ -35,4 +35,4 @@
Start the training directly using the train.py script.
```
python3 train.py
-```
\ No newline at end of file
+```
diff --git a/junior_class/chapter-2-step_to_deep_learning/code/datasets/example_0.png b/junior_class/chapter-2-step_to_deep_learning/code/datasets/example_0.png
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-2-step_to_deep_learning/code/datasets/generate.py b/junior_class/chapter-2-step_to_deep_learning/code/datasets/generate.py
old mode 100644
new mode 100755
index c72727168..2d9cc2d78
--- a/junior_class/chapter-2-step_to_deep_learning/code/datasets/generate.py
+++ b/junior_class/chapter-2-step_to_deep_learning/code/datasets/generate.py
@@ -99,7 +99,8 @@ def __init__(self, mode):
# 获得测试数据集
imgs, labels = test_set[0], test_set[1]
else:
- raise Exception("mode can only be one of ['train', 'valid', 'eval']")
+ raise Exception(
+ "mode can only be one of ['train', 'valid', 'eval']")
# 校验数据
assert len(imgs) == len(labels), \
diff --git a/junior_class/chapter-2-step_to_deep_learning/code/datasets/mnist.json.gz b/junior_class/chapter-2-step_to_deep_learning/code/datasets/mnist.json.gz
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-2-step_to_deep_learning/code/nets/__init__.py b/junior_class/chapter-2-step_to_deep_learning/code/nets/__init__.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-2-step_to_deep_learning/code/nets/fnn.py b/junior_class/chapter-2-step_to_deep_learning/code/nets/fnn.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-2-step_to_deep_learning/code/nets/logistic.py b/junior_class/chapter-2-step_to_deep_learning/code/nets/logistic.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-2-step_to_deep_learning/code/train.py b/junior_class/chapter-2-step_to_deep_learning/code/train.py
old mode 100644
new mode 100755
index 7dc341578..6018007c8
--- a/junior_class/chapter-2-step_to_deep_learning/code/train.py
+++ b/junior_class/chapter-2-step_to_deep_learning/code/train.py
@@ -42,7 +42,8 @@ def update_summary(self, **kwargs):
pass
else:
for name in kwargs:
- self.summary_writer.add_scalar(tag=name, step=self.global_step, value=kwargs[name])
+ self.summary_writer.add_scalar(
+ tag=name, step=self.global_step, value=kwargs[name])
def save(self):
paddle.save(self.model.state_dict(), self.model_path)
@@ -84,7 +85,8 @@ def train_epoch(self, datasets, epoch):
self.global_step += 1
# 每训练了1000批次的数据,打印下当前Loss的情况
if batch_id % 100 == 0:
- print("epoch_id: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, loss.numpy()))
+ print("epoch_id: {}, batch_id: {}, loss is: {}".format(
+ epoch, batch_id, loss.numpy()))
def train(self, train_datasets, val_datasets, epochs):
for i in range(epochs):
@@ -92,7 +94,8 @@ def train(self, train_datasets, val_datasets, epochs):
train_acc = self.val_epoch(train_datasets)
val_acc = self.val_epoch(val_datasets)
self.update_summary(train_acc=train_acc, val_acc=val_acc)
- print("epoch_id: {}, train acc is: {}, val acc is {}".format(i, train_acc, val_acc))
+ print("epoch_id: {}, train acc is: {}, val acc is {}".format(
+ i, train_acc, val_acc))
self.save()
@@ -102,10 +105,8 @@ def main():
model_path = './mnist.pdparams'
train_dataset = MnistDataset(mode='train')
- train_loader = paddle.io.DataLoader(train_dataset,
- batch_size=32,
- shuffle=True,
- num_workers=4)
+ train_loader = paddle.io.DataLoader(
+ train_dataset, batch_size=32, shuffle=True, num_workers=4)
val_dataset = MnistDataset(mode='val')
val_loader = paddle.io.DataLoader(val_dataset, batch_size=128)
@@ -113,17 +114,15 @@ def main():
# model = fnn.MNIST()
model = logistic.MNIST()
# opt = paddle.optimizer.SGD(learning_rate=lr, parameters=model.parameters())
- opt = paddle.optimizer.SGD(learning_rate=lr,
- weight_decay=paddle.regularizer.L2Decay(coeff=5e-4),
- parameters=model.parameters())
+ opt = paddle.optimizer.SGD(
+ learning_rate=lr,
+ weight_decay=paddle.regularizer.L2Decay(coeff=5e-4),
+ parameters=model.parameters())
- trainer = Trainer(
- model_path=model_path,
- model=model,
- optimizer=opt
- )
+ trainer = Trainer(model_path=model_path, model=model, optimizer=opt)
- trainer.train(train_datasets=train_loader, val_datasets=val_loader, epochs=epochs)
+ trainer.train(
+ train_datasets=train_loader, val_datasets=val_loader, epochs=epochs)
if __name__ == '__main__':
diff --git a/junior_class/chapter-2-step_to_deep_learning/notebook/mnist.ipynb b/junior_class/chapter-2-step_to_deep_learning/notebook/mnist.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/README.md b/junior_class/chapter-3-Computer_Vision/README.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/README_en.md b/junior_class/chapter-3-Computer_Vision/README_en.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/.idea/.gitignore b/junior_class/chapter-3-Computer_Vision/code/.idea/.gitignore
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/.idea/code.iml b/junior_class/chapter-3-Computer_Vision/code/.idea/code.iml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/.idea/inspectionProfiles/profiles_settings.xml b/junior_class/chapter-3-Computer_Vision/code/.idea/inspectionProfiles/profiles_settings.xml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/.idea/misc.xml b/junior_class/chapter-3-Computer_Vision/code/.idea/misc.xml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/.idea/modules.xml b/junior_class/chapter-3-Computer_Vision/code/.idea/modules.xml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/.idea/vcs.xml b/junior_class/chapter-3-Computer_Vision/code/.idea/vcs.xml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/000000098520.jpg b/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/000000098520.jpg
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/000000355610.jpg b/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/000000355610.jpg
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/Average_filtering.py b/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/Average_filtering.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/BatchNorm1D.py b/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/BatchNorm1D.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/BatchNorm2D.py b/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/BatchNorm2D.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/Black_and_white_boundary_detection.py b/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/Black_and_white_boundary_detection.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/Dropout.py b/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/Dropout.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/Edge_detection.py b/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/Edge_detection.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/SimpleNet.py b/junior_class/chapter-3-Computer_Vision/code/CNN_Basis/SimpleNet.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/Paddle_highAPI.py b/junior_class/chapter-3-Computer_Vision/code/Paddle_highAPI.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/README.md b/junior_class/chapter-3-Computer_Vision/code/README.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/README_en.md b/junior_class/chapter-3-Computer_Vision/code/README_en.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/data/labels.csv b/junior_class/chapter-3-Computer_Vision/code/data/labels.csv
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/datasets/__init__.py b/junior_class/chapter-3-Computer_Vision/code/datasets/__init__.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/datasets/dataset.py b/junior_class/chapter-3-Computer_Vision/code/datasets/dataset.py
old mode 100644
new mode 100755
index 3b5016a00..57dfab428
--- a/junior_class/chapter-3-Computer_Vision/code/datasets/dataset.py
+++ b/junior_class/chapter-3-Computer_Vision/code/datasets/dataset.py
@@ -64,8 +64,8 @@ def reader():
# 当数据列表的长度等于batch_size的时候,
# 把这些数据当作一个mini-batch,并作为数据生成器的一个输出
imgs_array = np.array(batch_imgs).astype('float32')
- labels_array = np.array(batch_labels).astype(
- 'float32').reshape(-1, 1)
+ labels_array = np.array(batch_labels).astype('float32').reshape(
+ -1, 1)
yield imgs_array, labels_array
batch_imgs = []
batch_labels = []
@@ -111,8 +111,8 @@ def reader():
# 当数据列表的长度等于batch_size的时候,
# 把这些数据当作一个mini-batch,并作为数据生成器的一个输出
imgs_array = np.array(batch_imgs).astype('float32')
- labels_array = np.array(batch_labels).astype(
- 'float32').reshape(-1, 1)
+ labels_array = np.array(batch_labels).astype('float32').reshape(
+ -1, 1)
yield imgs_array, labels_array
batch_imgs = []
batch_labels = []
diff --git a/junior_class/chapter-3-Computer_Vision/code/nets/AlexNet.py b/junior_class/chapter-3-Computer_Vision/code/nets/AlexNet.py
old mode 100644
new mode 100755
index 70d3c9496..02733873c
--- a/junior_class/chapter-3-Computer_Vision/code/nets/AlexNet.py
+++ b/junior_class/chapter-3-Computer_Vision/code/nets/AlexNet.py
@@ -25,11 +25,7 @@ def __init__(self, num_classes=1):
# AlexNet与LeNet一样也会同时使用卷积和池化层提取图像特征
# 与LeNet不同的是激活函数换成了‘relu’
self.conv1 = Conv2D(
- in_channels=3,
- out_channels=96,
- kernel_size=11,
- stride=4,
- padding=5)
+ in_channels=3, out_channels=96, kernel_size=11, stride=4, padding=5)
self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)
self.conv2 = Conv2D(
in_channels=96,
diff --git a/junior_class/chapter-3-Computer_Vision/code/nets/GoogLeNet.py b/junior_class/chapter-3-Computer_Vision/code/nets/GoogLeNet.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/nets/LeNet.py b/junior_class/chapter-3-Computer_Vision/code/nets/LeNet.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/nets/LeNet_PALM.py b/junior_class/chapter-3-Computer_Vision/code/nets/LeNet_PALM.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/nets/ResNet.py b/junior_class/chapter-3-Computer_Vision/code/nets/ResNet.py
old mode 100644
new mode 100755
index a7d97dc81..2bf1f0985
--- a/junior_class/chapter-3-Computer_Vision/code/nets/ResNet.py
+++ b/junior_class/chapter-3-Computer_Vision/code/nets/ResNet.py
@@ -146,11 +146,7 @@ def __init__(self, layers=50, class_dim=1):
# ResNet的第一个模块,包含1个7x7卷积,后面跟着1个最大池化层
self.conv = ConvBNLayer(
- num_channels=3,
- num_filters=64,
- filter_size=7,
- stride=2,
- act='relu')
+ num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu')
self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
# ResNet的第二到第五个模块c2、c3、c4、c5
diff --git a/junior_class/chapter-3-Computer_Vision/code/nets/__init__.py b/junior_class/chapter-3-Computer_Vision/code/nets/__init__.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/nets/vgg.py b/junior_class/chapter-3-Computer_Vision/code/nets/vgg.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/train_MNIST.py b/junior_class/chapter-3-Computer_Vision/code/train_MNIST.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/code/train_PALM.py b/junior_class/chapter-3-Computer_Vision/code/train_PALM.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/notebook/3-1-CV-CNN_Basis.ipynb b/junior_class/chapter-3-Computer_Vision/notebook/3-1-CV-CNN_Basis.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-3-Computer_Vision/notebook/3-2-CV-Image_Classification.ipynb b/junior_class/chapter-3-Computer_Vision/notebook/3-2-CV-Image_Classification.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/.idea/.gitignore b/junior_class/chapter-4-Object_Detection/.idea/.gitignore
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/.idea/chapter-4-Object_Detection.iml b/junior_class/chapter-4-Object_Detection/.idea/chapter-4-Object_Detection.iml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/.idea/inspectionProfiles/profiles_settings.xml b/junior_class/chapter-4-Object_Detection/.idea/inspectionProfiles/profiles_settings.xml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/.idea/misc.xml b/junior_class/chapter-4-Object_Detection/.idea/misc.xml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/.idea/modules.xml b/junior_class/chapter-4-Object_Detection/.idea/modules.xml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/.idea/vcs.xml b/junior_class/chapter-4-Object_Detection/.idea/vcs.xml
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/README.md b/junior_class/chapter-4-Object_Detection/README.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/README_en.md b/junior_class/chapter-4-Object_Detection/README_en.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/Detection_basis/000000086956.jpg b/junior_class/chapter-4-Object_Detection/code/Detection_basis/000000086956.jpg
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/Detection_basis/__init__.py b/junior_class/chapter-4-Object_Detection/code/Detection_basis/__init__.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/Detection_basis/box_iou_xywh.py b/junior_class/chapter-4-Object_Detection/code/Detection_basis/box_iou_xywh.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/Detection_basis/box_iou_xyxy.py b/junior_class/chapter-4-Object_Detection/code/Detection_basis/box_iou_xyxy.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/Detection_basis/draw_anchor_box.py b/junior_class/chapter-4-Object_Detection/code/Detection_basis/draw_anchor_box.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/Detection_basis/draw_rectangle.py b/junior_class/chapter-4-Object_Detection/code/Detection_basis/draw_rectangle.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/Detection_basis/mAP.py b/junior_class/chapter-4-Object_Detection/code/Detection_basis/mAP.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/Detection_basis/multiclass_nms.py b/junior_class/chapter-4-Object_Detection/code/Detection_basis/multiclass_nms.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/Detection_basis/nms.py b/junior_class/chapter-4-Object_Detection/code/Detection_basis/nms.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/README.md b/junior_class/chapter-4-Object_Detection/code/README.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/README_en.md b/junior_class/chapter-4-Object_Detection/code/README_en.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/dataset/__init__.py b/junior_class/chapter-4-Object_Detection/code/dataset/__init__.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/dataset/dataset.py b/junior_class/chapter-4-Object_Detection/code/dataset/dataset.py
old mode 100644
new mode 100755
index de2c2e9d4..75bcd4596
--- a/junior_class/chapter-4-Object_Detection/code/dataset/dataset.py
+++ b/junior_class/chapter-4-Object_Detection/code/dataset/dataset.py
@@ -80,8 +80,8 @@ def get_annotations(cname2cid, datadir):
x2 = min(im_w - 1, x2)
y2 = min(im_h - 1, y2)
# 这里使用xywh格式来表示目标物体真实框
- gt_bbox[i] = [(x1 + x2) / 2.0, (y1 + y2) / 2.0, x2 - x1 + 1., y2 -
- y1 + 1.]
+ gt_bbox[i] = [(x1 + x2) / 2.0, (y1 + y2) / 2.0, x2 - x1 + 1.,
+ y2 - y1 + 1.]
is_crowd[i] = 0
difficult[i] = _difficult
diff --git a/junior_class/chapter-4-Object_Detection/code/dataset/transform.py b/junior_class/chapter-4-Object_Detection/code/dataset/transform.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/net/YOLOv3.py b/junior_class/chapter-4-Object_Detection/code/net/YOLOv3.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/net/__init__.py b/junior_class/chapter-4-Object_Detection/code/net/__init__.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/predict.py b/junior_class/chapter-4-Object_Detection/code/predict.py
old mode 100644
new mode 100755
index 19388e71e..54ca2f8a7
--- a/junior_class/chapter-4-Object_Detection/code/predict.py
+++ b/junior_class/chapter-4-Object_Detection/code/predict.py
@@ -24,8 +24,7 @@
# 将 list形式的batch数据 转化成多个array构成的tuple
def make_test_array(batch_data):
img_name_array = np.array([item[0] for item in batch_data])
- img_data_array = np.array(
- [item[1] for item in batch_data], dtype='float32')
+ img_data_array = np.array([item[1] for item in batch_data], dtype='float32')
img_scale_array = np.array([item[2] for item in batch_data], dtype='int32')
return img_name_array, img_data_array, img_scale_array
@@ -68,8 +67,7 @@ def reader():
ANCHORS = [
- 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373,
- 326
+ 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326
]
ANCHOR_MASKS = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
VALID_THRESH = 0.01
diff --git a/junior_class/chapter-4-Object_Detection/code/predict_one_pic.py b/junior_class/chapter-4-Object_Detection/code/predict_one_pic.py
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/code/train.py b/junior_class/chapter-4-Object_Detection/code/train.py
old mode 100644
new mode 100755
index 2c4a9b330..bb8d5cf20
--- a/junior_class/chapter-4-Object_Detection/code/train.py
+++ b/junior_class/chapter-4-Object_Detection/code/train.py
@@ -19,8 +19,7 @@
from net import YOLOv3
ANCHORS = [
- 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373,
- 326
+ 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326
]
ANCHOR_MASKS = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
@@ -32,8 +31,7 @@
def get_lr(base_lr=0.0001, lr_decay=0.1):
bd = [10000, 20000]
lr = [base_lr, base_lr * lr_decay, base_lr * lr_decay * lr_decay]
- learning_rate = paddle.optimizer.lr.PiecewiseDecay(
- boundaries=bd, values=lr)
+ learning_rate = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, values=lr)
return learning_rate
diff --git a/junior_class/chapter-4-Object_Detection/notebook/.ipynb_checkpoints/4-1-Object_Detection-checkpoint.ipynb b/junior_class/chapter-4-Object_Detection/notebook/.ipynb_checkpoints/4-1-Object_Detection-checkpoint.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/notebook/.ipynb_checkpoints/4-2-YOLOv3-checkpoint.ipynb b/junior_class/chapter-4-Object_Detection/notebook/.ipynb_checkpoints/4-2-YOLOv3-checkpoint.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/notebook/4-1-Object_Detection.ipynb b/junior_class/chapter-4-Object_Detection/notebook/4-1-Object_Detection.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-4-Object_Detection/notebook/4-2-YOLOv3.ipynb b/junior_class/chapter-4-Object_Detection/notebook/4-2-YOLOv3.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-5-NLP/README.md b/junior_class/chapter-5-NLP/README.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-5-NLP/README_en.md b/junior_class/chapter-5-NLP/README_en.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-5-NLP/code/README.md b/junior_class/chapter-5-NLP/code/README.md
old mode 100644
new mode 100755
index 8099dcc81..01bc5bf6a
--- a/junior_class/chapter-5-NLP/code/README.md
+++ b/junior_class/chapter-5-NLP/code/README.md
@@ -9,7 +9,7 @@
其中paddle请安装2.0版本,具体安装方式请参考
[飞桨官网->快速安装](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/2.0/install/pip/windows-pip.html) 。
-
+
## 项目介绍
```buildoutcfg
|-data: 存放下载后的数据
@@ -22,4 +22,4 @@
```
## 启动训练
->python train.py
\ No newline at end of file
+>python train.py
diff --git a/junior_class/chapter-5-NLP/code/README_en.md b/junior_class/chapter-5-NLP/code/README_en.md
old mode 100644
new mode 100755
index 09776daa3..81b2426e2
--- a/junior_class/chapter-5-NLP/code/README_en.md
+++ b/junior_class/chapter-5-NLP/code/README_en.md
@@ -9,17 +9,17 @@
**note**: please install paddle with version 2.0. if you have not installed it, please refer to
[ the quick install](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/2.0/install/pip/windows-pip.html) 。
-
+
## Structure
```buildoutcfg
|-data: the dir of saving dataset
-|-model:
+|-model:
|-word2vec: the implement of skip gram
-|-utils:
+|-utils:
|-data_processor.py: the operations related data processing
|-utils: some tool methods
|-train.py: the script of training model
```
## Training Model
->python train.py
\ No newline at end of file
+>python train.py
diff --git a/junior_class/chapter-5-NLP/code/model/word2vec.py b/junior_class/chapter-5-NLP/code/model/word2vec.py
old mode 100644
new mode 100755
index 40cfd9e86..0fd3f4be1
--- a/junior_class/chapter-5-NLP/code/model/word2vec.py
+++ b/junior_class/chapter-5-NLP/code/model/word2vec.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
from paddle.nn import Embedding
import paddle.nn.functional as F
@@ -46,7 +45,8 @@ def __init__(self, vocab_size, embedding_size, init_scale=0.1):
num_embeddings=self.vocab_size,
embedding_dim=self.embedding_size,
weight_attr=paddle.ParamAttr(
- initializer=paddle.nn.initializer.Uniform(low=-init_scale, high=init_scale)))
+ initializer=paddle.nn.initializer.Uniform(
+ low=-init_scale, high=init_scale)))
# 定义网络的前向计算逻辑
# center_words是一个tensor(mini-batch),表示中心词
diff --git a/junior_class/chapter-5-NLP/code/train.py b/junior_class/chapter-5-NLP/code/train.py
old mode 100644
new mode 100755
index 66bacd67a..b4d546169
--- a/junior_class/chapter-5-NLP/code/train.py
+++ b/junior_class/chapter-5-NLP/code/train.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import paddle
from model import word2vec
@@ -28,7 +27,8 @@ def get_dataset(data_path, corpus_rate=1.0, subsampling=True):
corpus = data_processor.data_preprocess(corpus)
corpus = corpus[:int(len(corpus) * corpus_rate)]
# 根据语料构造字典,统计每个词的频率,并根据频率将每个词转换为一个整数id
- word2id_freq, word2id_dict, id2word_dict = data_processor.build_dict(corpus)
+ word2id_freq, word2id_dict, id2word_dict = data_processor.build_dict(
+ corpus)
# 将语料转换为ID序列
corpus = data_processor.convert_corpus_to_id(corpus, word2id_dict)
# 使用二次采样算法处理语料,强化训练效果
@@ -39,6 +39,7 @@ def get_dataset(data_path, corpus_rate=1.0, subsampling=True):
return dataset, word2id_dict, id2word_dict
+
def train(model, data_loader):
# 开始训练,定义一些训练过程中需要使用的超参数
batch_size = 128
@@ -56,7 +57,8 @@ def train(model, data_loader):
model.train()
# 构造训练这个网络的优化器
- adam = paddle.optimizer.Adam(learning_rate=learning_rate, parameters=model.parameters())
+ adam = paddle.optimizer.Adam(
+ learning_rate=learning_rate, parameters=model.parameters())
# 使用build_batch函数,以mini-batch为单位,遍历训练数据,并训练网络
for center_words, target_words, label in data_loader:
@@ -82,9 +84,12 @@ def train(model, data_loader):
# 每隔10000步,打印一次模型对以下查询词的相似词,这里我们使用词和词之间的向量点积作为衡量相似度的方法,只打印了5个最相似的词
if step % 10000 == 0:
- utils.get_similar_tokens('movie', 5, model.embedding.weight, word2id_dict, id2word_dict)
- utils.get_similar_tokens('one', 5, model.embedding.weight, word2id_dict, id2word_dict)
- utils.get_similar_tokens('chip', 5, model.embedding.weight, word2id_dict, id2word_dict)
+ utils.get_similar_tokens('movie', 5, model.embedding.weight,
+ word2id_dict, id2word_dict)
+ utils.get_similar_tokens('one', 5, model.embedding.weight,
+ word2id_dict, id2word_dict)
+ utils.get_similar_tokens('chip', 5, model.embedding.weight,
+ word2id_dict, id2word_dict)
if __name__ == '__main__':
@@ -99,12 +104,14 @@ def train(model, data_loader):
dataset_save_path = "./data/text8.txt"
if not os.path.exists(dataset_save_path):
dataset_download_path = "https://dataset.bj.bcebos.com/word2vec/text8.txt"
- data_processor.download(save_path=dataset_save_path, corpus_url=dataset_download_path)
+ data_processor.download(
+ save_path=dataset_save_path, corpus_url=dataset_download_path)
# 获得数据集
- dataset, word2id_dict, id2word_dict = get_dataset(dataset_save_path, corpus_rate=0.2)
+ dataset, word2id_dict, id2word_dict = get_dataset(
+ dataset_save_path, corpus_rate=0.2)
data_loader = data_processor.build_batch(dataset, batch_size, epoch_num)
# 初始化word2vec实例
vocab_size = len(word2id_dict.keys())
skip_gram = word2vec.SkipGram(vocab_size, embedding_size)
# 开始模型训练
- train(skip_gram, data_loader)
\ No newline at end of file
+ train(skip_gram, data_loader)
diff --git a/junior_class/chapter-5-NLP/code/utils/data_processor.py b/junior_class/chapter-5-NLP/code/utils/data_processor.py
old mode 100644
new mode 100755
index 0d30cec68..a3384cd61
--- a/junior_class/chapter-5-NLP/code/utils/data_processor.py
+++ b/junior_class/chapter-5-NLP/code/utils/data_processor.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import math
import random
@@ -21,7 +20,8 @@
# 下载语料用来训练word2vec
-def download(save_path, corpus_url="https://dataset.bj.bcebos.com/word2vec/text8.txt"):
+def download(save_path,
+ corpus_url="https://dataset.bj.bcebos.com/word2vec/text8.txt"):
# 可以从百度云服务器下载一些开源数据集(dataset.bj.bcebos.com),使用python的requests包下载数据集到本地
web_request = requests.get(corpus_url)
corpus = web_request.content
@@ -63,7 +63,8 @@ def build_dict(corpus):
# 将这个词典中的词,按照出现次数排序,出现次数越高,排序越靠前
# 一般来说,出现频率高的高频词往往是:I,the,you这种代词,而出现频率低的词,往往是一些名词,如:nlp
- word_freq_dict = sorted(word_freq_dict.items(), key=lambda x: x[1], reverse=True)
+ word_freq_dict = sorted(
+ word_freq_dict.items(), key=lambda x: x[1], reverse=True)
# 构造3个不同的词典,分别存储,
# 每个词到id的映射关系:word2id_dict
@@ -95,8 +96,8 @@ def subsampling(corpus, word2id_freq):
# 这个discard函数决定了一个词会不会被替换,这个函数是具有随机性的,每次调用结果不同
# 如果一个词的频率很大,那么它被遗弃的概率就很大
def discard(word_id):
- return random.uniform(0, 1) < 1 - math.sqrt(
- 1e-4 / word2id_freq[word_id] * len(corpus))
+ return random.uniform(
+ 0, 1) < 1 - math.sqrt(1e-4 / word2id_freq[word_id] * len(corpus))
corpus = [word for word in corpus if not discard(word)]
return corpus
@@ -120,9 +121,13 @@ def build_data(corpus, word2id_dict, max_window_size=3, negative_sample_num=4):
# 以当前中心词为中心,左右两侧在window_size内的词都可以看成是正样本
positive_word_range = (
- max(0, center_word_idx - window_size), min(len(corpus) - 1, center_word_idx + window_size))
- positive_word_candidates = [corpus[idx] for idx in range(positive_word_range[0], positive_word_range[1] + 1) if
- idx != center_word_idx]
+ max(0, center_word_idx - window_size),
+ min(len(corpus) - 1, center_word_idx + window_size))
+ positive_word_candidates = [
+ corpus[idx]
+ for idx in range(positive_word_range[0], positive_word_range[1] +
+ 1) if idx != center_word_idx
+ ]
# 对于每个正样本来说,随机采样negative_sample_num个负样本,用于训练
for positive_word in positive_word_candidates:
@@ -169,11 +174,14 @@ def build_batch(dataset, batch_size, epoch_num):
# 并使用python的迭代器机制,将数据yield出来
# 使用迭代器的好处是可以节省内存
if len(center_word_batch) == batch_size:
- yield np.array(center_word_batch).astype("int64"), np.array(target_word_batch).astype("int64"), np.array(label_batch).astype("float32")
+ yield np.array(center_word_batch).astype("int64"), np.array(
+ target_word_batch).astype("int64"), np.array(
+ label_batch).astype("float32")
center_word_batch = []
target_word_batch = []
label_batch = []
if len(center_word_batch) > 0:
- yield np.array(center_word_batch).astype("int64"), np.array(target_word_batch).astype("int64"), np.array(
- label_batch).astype("float32")
+ yield np.array(center_word_batch).astype("int64"), np.array(
+ target_word_batch).astype("int64"), np.array(label_batch).astype(
+ "float32")
diff --git a/junior_class/chapter-5-NLP/code/utils/utils.py b/junior_class/chapter-5-NLP/code/utils/utils.py
old mode 100644
new mode 100755
index d3a971e25..4aa51d8f9
--- a/junior_class/chapter-5-NLP/code/utils/utils.py
+++ b/junior_class/chapter-5-NLP/code/utils/utils.py
@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import numpy as np
+
# 定义一个使用word-embedding查询同义词的函数
# 这个函数query_token是要查询的词,k表示要返回多少个最相似的词,embed是我们学习到的word-embedding参数
# 我们通过计算不同词之间的cosine距离,来衡量词和词的相似度
@@ -28,4 +28,5 @@ def get_similar_tokens(query_token, k, embed, word2id_dict, id2word_dict):
indices = np.argpartition(flat, -k)[-k:]
indices = indices[np.argsort(-flat[indices])]
for i in indices:
- print('for word %s, the similar word is %s' % (query_token, str(id2word_dict[i])))
+ print('for word %s, the similar word is %s' %
+ (query_token, str(id2word_dict[i])))
diff --git a/junior_class/chapter-5-NLP/notebook/5-1-overview_of_natural_language_processing.ipynb b/junior_class/chapter-5-NLP/notebook/5-1-overview_of_natural_language_processing.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-5-NLP/notebook/5-2-word_embedding.ipynb b/junior_class/chapter-5-NLP/notebook/5-2-word_embedding.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-6-sentiment_classification/README.md b/junior_class/chapter-6-sentiment_classification/README.md
old mode 100644
new mode 100755
index 51edc2736..e75b4b465
--- a/junior_class/chapter-6-sentiment_classification/README.md
+++ b/junior_class/chapter-6-sentiment_classification/README.md
@@ -7,4 +7,3 @@
本章内容同时为大家提供了notebook和code文件,读者可按需选择学习:
- notebook 给大家提供本章的学习教程,有完整的文字说明内容;为了有更好的阅读方式大家也可以访问 AIStudio 平台上的 [notebook文档](https://aistudio.baidu.com/aistudio/education/group/info/1297/content) 。
- code 给大家提供了本章内容的完整学习代码,具体使用教程参考 code 部分的 [README](./code/README.md)。
-
diff --git a/junior_class/chapter-6-sentiment_classification/README_en.md b/junior_class/chapter-6-sentiment_classification/README_en.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-6-sentiment_classification/code/README.md b/junior_class/chapter-6-sentiment_classification/code/README.md
old mode 100644
new mode 100755
index 7e8065210..a5df639dc
--- a/junior_class/chapter-6-sentiment_classification/code/README.md
+++ b/junior_class/chapter-6-sentiment_classification/code/README.md
@@ -27,4 +27,4 @@
>python train.py
## 启动评估
->python evaluate.py
\ No newline at end of file
+>python evaluate.py
diff --git a/junior_class/chapter-6-sentiment_classification/code/README_en.md b/junior_class/chapter-6-sentiment_classification/code/README_en.md
old mode 100644
new mode 100755
index 03cca0fa9..ec249fcc1
--- a/junior_class/chapter-6-sentiment_classification/code/README_en.md
+++ b/junior_class/chapter-6-sentiment_classification/code/README_en.md
@@ -15,9 +15,9 @@
## Structure
```buildoutcfg
|-data: The dir of saving dataset or trained model
-|-model:
+|-model:
|-sentiment_classifier: The implement of sentiment classification
-|-utils:
+|-utils:
|-data_processor.py: the operations related data processing
|-train.py: the script of training model
|-evaluate.py: the script of evaluating model
@@ -27,4 +27,4 @@
>python train.py
## Evaluating Model
->python evaluate.py
\ No newline at end of file
+>python evaluate.py
diff --git a/junior_class/chapter-6-sentiment_classification/code/evaluate.py b/junior_class/chapter-6-sentiment_classification/code/evaluate.py
old mode 100644
new mode 100755
index f2f125a6f..cec27ead3
--- a/junior_class/chapter-6-sentiment_classification/code/evaluate.py
+++ b/junior_class/chapter-6-sentiment_classification/code/evaluate.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
from model import sentiment_classifier
from utils import data_processor
@@ -89,12 +88,15 @@ def evaluate(model, test_loader):
model_save_path = "./data/sentiment_classifiter.pdparams"
# 加载数据集
- dataset, word2id_dict = get_dataset(dataset_save_path, dict_save_path, is_training=False)
- data_loader = data_processor.build_batch(word2id_dict, dataset, batch_size, epoch_num, max_seq_len)
+ dataset, word2id_dict = get_dataset(
+ dataset_save_path, dict_save_path, is_training=False)
+ data_loader = data_processor.build_batch(word2id_dict, dataset, batch_size,
+ epoch_num, max_seq_len)
# 初始化并加载训练好的模型
vocab_size = len(word2id_dict.keys())
- sentiment_classifier = sentiment_classifier.SentimentClassifier(embedding_size, vocab_size, num_steps=max_seq_len, num_layers=1)
+ sentiment_classifier = sentiment_classifier.SentimentClassifier(
+ embedding_size, vocab_size, num_steps=max_seq_len, num_layers=1)
saved_state = paddle.load(model_save_path)
sentiment_classifier.load_dict(saved_state)
diff --git a/junior_class/chapter-6-sentiment_classification/code/model/sentiment_classifier.py b/junior_class/chapter-6-sentiment_classification/code/model/sentiment_classifier.py
old mode 100644
new mode 100755
index 749750ddc..d990cfac6
--- a/junior_class/chapter-6-sentiment_classification/code/model/sentiment_classifier.py
+++ b/junior_class/chapter-6-sentiment_classification/code/model/sentiment_classifier.py
@@ -12,14 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
import paddle.nn.functional as F
from paddle.nn import LSTM, Embedding, Dropout, Linear
import numpy as np
+
class SentimentClassifier(paddle.nn.Layer):
- def __init__(self, hidden_size, vocab_size, class_num=2, num_steps=128, num_layers=1, init_scale=0.1, dropout=None):
+ def __init__(self,
+ hidden_size,
+ vocab_size,
+ class_num=2,
+ num_steps=128,
+ num_layers=1,
+ init_scale=0.1,
+ dropout=None):
# 参数含义如下:
# 1.hidden_size,表示embedding-size,hidden和cell向量的维度
@@ -41,11 +48,19 @@ def __init__(self, hidden_size, vocab_size, class_num=2, num_steps=128, num_laye
self.dropout = dropout
# 声明一个LSTM模型,用来把每个句子抽象成向量
- self.simple_lstm_rnn = LSTM(input_size=hidden_size, hidden_size=hidden_size, num_layers=num_layers)
+ self.simple_lstm_rnn = LSTM(
+ input_size=hidden_size,
+ hidden_size=hidden_size,
+ num_layers=num_layers)
# 声明一个embedding层,用来把句子中的每个词转换为向量
- self.embedding = Embedding(num_embeddings=vocab_size, embedding_dim=hidden_size, sparse=False,
- weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Uniform(low=-init_scale, high=init_scale)))
+ self.embedding = Embedding(
+ num_embeddings=vocab_size,
+ embedding_dim=hidden_size,
+ sparse=False,
+ weight_attr=paddle.ParamAttr(
+ initializer=paddle.nn.initializer.Uniform(
+ low=-init_scale, high=init_scale)))
# 在得到一个句子的向量表示后,需要根据这个向量表示对这个句子进行分类
# 一般来说,可以把这个句子的向量表示乘以一个大小为[self.hidden_size, self.class_num]的W参数,
@@ -53,8 +68,11 @@ def __init__(self, hidden_size, vocab_size, class_num=2, num_steps=128, num_laye
# 我们需要声明最终在使用句子向量映射到具体情感类别过程中所需要使用的参数
# 这个参数的大小一般是[self.hidden_size, self.class_num]
- self.cls_fc = Linear(in_features=self.hidden_size, out_features=self.class_num,
- weight_attr=None, bias_attr=None)
+ self.cls_fc = Linear(
+ in_features=self.hidden_size,
+ out_features=self.class_num,
+ weight_attr=None,
+ bias_attr=None)
self.dropout_layer = Dropout(p=self.dropout, mode='upscale_in_train')
def forward(self, input, label):
@@ -86,7 +104,8 @@ def forward(self, input, label):
x_emb = self.dropout_layer(x_emb)
# 使用LSTM网络,把每个句子转换为向量表示
- rnn_out, (last_hidden, last_cell) = self.simple_lstm_rnn(x_emb, (init_h, init_c))
+ rnn_out, (last_hidden, last_cell) = self.simple_lstm_rnn(x_emb, (
+ init_h, init_c))
last_hidden = paddle.reshape(
last_hidden[-1], shape=[-1, self.hidden_size])
diff --git a/junior_class/chapter-6-sentiment_classification/code/train.py b/junior_class/chapter-6-sentiment_classification/code/train.py
old mode 100644
new mode 100755
index 0b9a05d97..4c05cad22
--- a/junior_class/chapter-6-sentiment_classification/code/train.py
+++ b/junior_class/chapter-6-sentiment_classification/code/train.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import paddle
from model import sentiment_classifier
@@ -32,6 +31,7 @@ def get_dataset(data_path, is_training=True):
return corpus, word2id_dict
+
def train(model, train_loader):
model.train()
@@ -41,7 +41,11 @@ def train(model, train_loader):
paddle.set_device('gpu:0')
# 创建优化器Optimizer,用于更新这个网络的参数
- optimizer = paddle.optimizer.Adam(learning_rate=0.01, beta1=0.9, beta2=0.999, parameters= model.parameters())
+ optimizer = paddle.optimizer.Adam(
+ learning_rate=0.01,
+ beta1=0.9,
+ beta2=0.999,
+ parameters=model.parameters())
# 开始训练
for step, (sentences, labels) in enumerate(train_loader):
@@ -74,16 +78,19 @@ def train(model, train_loader):
dataset_save_path = "./data/aclImdb_v1.tar.gz"
dataset_download_path = "https://dataset.bj.bcebos.com/imdb%2FaclImdb_v1.tar.gz"
if not os.path.exists(dataset_save_path):
- data_processor.download(save_path=dataset_save_path, corpus_url=dataset_download_path)
+ data_processor.download(
+ save_path=dataset_save_path, corpus_url=dataset_download_path)
# 加载数据集
dataset, word2id_dict = get_dataset(dataset_save_path, is_training=True)
- data_loader = data_processor.build_batch(word2id_dict, dataset, batch_size, epoch_num, max_seq_len)
+ data_loader = data_processor.build_batch(word2id_dict, dataset, batch_size,
+ epoch_num, max_seq_len)
# 初始化要训练的模型
vocab_size = len(word2id_dict.keys())
- sentiment_classifier = sentiment_classifier.SentimentClassifier(embedding_size, vocab_size, num_steps=max_seq_len, num_layers=1)
+ sentiment_classifier = sentiment_classifier.SentimentClassifier(
+ embedding_size, vocab_size, num_steps=max_seq_len, num_layers=1)
# 训练模型
train(sentiment_classifier, data_loader)
@@ -91,6 +98,6 @@ def train(model, train_loader):
# 保存词典和模型
with open(dict_save_path, "w") as f:
for word_id, word in enumerate(word2id_dict.keys()):
- f.write(word+"\t"+str(word_id)+"\n")
+ f.write(word + "\t" + str(word_id) + "\n")
- paddle.save(sentiment_classifier.state_dict(), model_save_path)
\ No newline at end of file
+ paddle.save(sentiment_classifier.state_dict(), model_save_path)
diff --git a/junior_class/chapter-6-sentiment_classification/code/utils/data_processor.py b/junior_class/chapter-6-sentiment_classification/code/utils/data_processor.py
old mode 100644
new mode 100755
index e44fd8c01..423778dc1
--- a/junior_class/chapter-6-sentiment_classification/code/utils/data_processor.py
+++ b/junior_class/chapter-6-sentiment_classification/code/utils/data_processor.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import re
import tarfile
@@ -22,7 +21,9 @@
# 下载语料用来训练word2vec
-def download(save_path, corpus_url="https://dataset.bj.bcebos.com/imdb%2FaclImdb_v1.tar.gz"):
+def download(
+ save_path,
+ corpus_url="https://dataset.bj.bcebos.com/imdb%2FaclImdb_v1.tar.gz"):
# 通过python的requests类,下载数据集
web_request = requests.get(corpus_url)
corpus = web_request.content
@@ -84,7 +85,8 @@ def build_dict(corpus):
word_freq_dict[word] = 0
word_freq_dict[word] += 1
- word_freq_dict = sorted(word_freq_dict.items(), key=lambda x: x[1], reverse=True)
+ word_freq_dict = sorted(
+ word_freq_dict.items(), key=lambda x: x[1], reverse=True)
word2id_dict = dict()
word2id_freq = dict()
@@ -110,13 +112,23 @@ def convert_corpus_to_id(corpus, word2id_dict):
# 将句子中的词逐个替换成id,如果句子中的词不在词表内,则替换成oov
# 这里需要注意,一般来说我们可能需要查看一下test-set中,句子oov的比例,
# 如果存在过多oov的情况,那就说明我们的训练数据不足或者切分存在巨大偏差,需要调整
- sentence = [word2id_dict[word] if word in word2id_dict else word2id_dict['[oov]'] for word in sentence]
+ sentence = [
+ word2id_dict[word]
+ if word in word2id_dict else word2id_dict['[oov]']
+ for word in sentence
+ ]
data_set.append((sentence, sentence_label))
return data_set
# 编写一个迭代器,每次调用这个迭代器都会返回一个新的batch,用于训练或者预测
-def build_batch(word2id_dict, corpus, batch_size, epoch_num, max_seq_len, shuffle=True, drop_last=True):
+def build_batch(word2id_dict,
+ corpus,
+ batch_size,
+ epoch_num,
+ max_seq_len,
+ shuffle=True,
+ drop_last=True):
# 模型将会接受的两个输入:
# 1. 一个形状为[batch_size, max_seq_len]的张量,sentence_batch,代表了一个mini-batch的句子。
# 2. 一个形状为[batch_size, 1]的张量,sentence_label_batch,每个元素都是非0即1,代表了每个句子的情感类别(正向或者负向)
@@ -142,11 +154,13 @@ def build_batch(word2id_dict, corpus, batch_size, epoch_num, max_seq_len, shuffl
sentence_label_batch.append([sentence_label])
if len(sentence_batch) == batch_size:
- yield np.array(sentence_batch).astype("int64"), np.array(sentence_label_batch).astype("int64")
+ yield np.array(sentence_batch).astype("int64"), np.array(
+ sentence_label_batch).astype("int64")
sentence_batch = []
sentence_label_batch = []
if not drop_last and len(sentence_batch) > 0:
- yield np.array(sentence_batch).astype("int64"), np.array(sentence_label_batch).astype("int64")
+ yield np.array(sentence_batch).astype("int64"), np.array(
+ sentence_label_batch).astype("int64")
# 加载词典
@@ -157,4 +171,4 @@ def load_dict(path):
word, word_id = line.rstrip().rsplit("\t", maxsplit=1)
word2id_dict[word] = int(word_id)
- return word2id_dict
\ No newline at end of file
+ return word2id_dict
diff --git a/junior_class/chapter-6-sentiment_classification/notebook/6-1-sentiment_classification.ipynb b/junior_class/chapter-6-sentiment_classification/notebook/6-1-sentiment_classification.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-7-Recommendation_System/README.md b/junior_class/chapter-7-Recommendation_System/README.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-7-Recommendation_System/README_en.md b/junior_class/chapter-7-Recommendation_System/README_en.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-7-Recommendation_System/code/README.md b/junior_class/chapter-7-Recommendation_System/code/README.md
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-7-Recommendation_System/code/README_en.md b/junior_class/chapter-7-Recommendation_System/code/README_en.md
old mode 100644
new mode 100755
index dcfa0945d..d639e83ff
--- a/junior_class/chapter-7-Recommendation_System/code/README_en.md
+++ b/junior_class/chapter-7-Recommendation_System/code/README_en.md
@@ -32,4 +32,3 @@ unzip -q ml-1m.zip
'''
python3 train.py
'''
-
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-1-user_data.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-1-user_data.py
old mode 100644
new mode 100755
index 2133b8018..d0669bfe2
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-1-user_data.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-1-user_data.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
# 解压数据集
# unzip -o -q -d ~/work/ ~/data/data19736/ml-1m.zip
@@ -22,17 +21,18 @@
with open(usr_file, 'r') as f:
data = f.readlines()
# 打印data的数据长度、第一条数据、数据类型
-print("data 数据长度是:",len(data))
+print("data 数据长度是:", len(data))
print("第一条数据是:", data[0])
print("数据类型:", type(data[0]))
def gender2num(gender):
return 1 if gender == 'F' else 0
+
+
print("性别M用数字 {} 表示".format(gender2num('M')))
print("性别F用数字 {} 表示".format(gender2num('F')))
-
usr_info = {}
max_usr_id = 0
#按行索引数据
@@ -41,26 +41,30 @@ def gender2num(gender):
item = item.strip().split("::")
usr_id = item[0]
# 将字符数据转成数字并保存在字典中
- usr_info[usr_id] = {'usr_id': int(usr_id),
- 'gender': gender2num(item[1]),
- 'age': int(item[2]),
- 'job': int(item[3])}
+ usr_info[usr_id] = {
+ 'usr_id': int(usr_id),
+ 'gender': gender2num(item[1]),
+ 'age': int(item[2]),
+ 'job': int(item[3])
+ }
max_usr_id = max(max_usr_id, int(usr_id))
print("用户ID为3的用户数据是:", usr_info['3'])
import numpy as np
+
+
def get_usr_info(path):
# 性别转换函数,M-0, F-1
def gender2num(gender):
return 1 if gender == 'F' else 0
-
+
# 打开文件,读取所有行到data中
with open(path, 'r') as f:
data = f.readlines()
# 建立用户信息的字典
use_info = {}
-
+
max_usr_id = 0
#按行索引数据
for item in data:
@@ -68,18 +72,19 @@ def gender2num(gender):
item = item.strip().split("::")
usr_id = item[0]
# 将字符数据转成数字并保存在字典中
- use_info[usr_id] = {'usr_id': int(usr_id),
- 'gender': gender2num(item[1]),
- 'age': int(item[2]),
- 'job': int(item[3])}
+ use_info[usr_id] = {
+ 'usr_id': int(usr_id),
+ 'gender': gender2num(item[1]),
+ 'age': int(item[2]),
+ 'job': int(item[3])
+ }
max_usr_id = max(max_usr_id, int(usr_id))
-
+
return use_info, max_usr_id
+
usr_file = "./work/ml-1m/users.dat"
usr_info, max_usr_id = get_usr_info(usr_file)
print("用户数量:", len(usr_info))
print("最大用户ID:", max_usr_id)
print("第1个用户的信息是:", usr_info['1'])
-
-
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-2-movie_data.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-2-movie_data.py
old mode 100644
new mode 100755
index 2fc1bbece..79f01522a
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-2-movie_data.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-2-movie_data.py
@@ -26,12 +26,11 @@
print("movie year:", item[1][-5:-1])
print("movie genre:", item[2].split('|'))
-
movie_info_path = "./work/ml-1m/movies.dat"
# 打开文件,编码方式选择ISO-8859-1,读取所有数据到data中
with open(movie_info_path, 'r', encoding="ISO-8859-1") as f:
data = f.readlines()
-
+
movie_info = {}
for item in data:
item = item.strip().split("::")
@@ -52,25 +51,27 @@
item = item.strip().split("::")
# 1. 获得电影的ID信息
v_id = item[0]
- v_title = item[1][:-7] # 去掉title中年份数据
+ v_title = item[1][:-7] # 去掉title中年份数据
v_year = item[1][-5:-1]
titles = v_title.split()
# 获得title最大长度
max_title_length = max((max_title_length, len(titles)))
-
+
# 2. 统计电影名字的单词,并给每个单词一个序号,放在movie_titles中
for t in titles:
if t not in movie_titles:
movie_titles[t] = t_count
t_count += 1
-
+
v_tit = [movie_titles[k] for k in titles]
# 保存电影ID数据和title数据到字典中
- movie_info[v_id] = {'mov_id': int(v_id),
- 'title': v_tit,
- 'years': int(v_year)}
-
-print("最大电影title长度是:", max_title_length)
+ movie_info[v_id] = {
+ 'mov_id': int(v_id),
+ 'title': v_tit,
+ 'years': int(v_year)
+ }
+
+print("最大电影title长度是:", max_title_length)
ID = 1
# 读取第一条数据,并打印
item = data[0]
@@ -79,7 +80,6 @@
print("电影 title:", item[1][:-7])
print("ID为1 的电影数据是:", movie_info['1'])
-
# 用于记录电影类别每个单词对应哪个序号
movie_titles, movie_cat = {}, {}
max_title_length = 0
@@ -95,7 +95,7 @@
# 获得电影类别数量的最大长度
max_cat_length = max((max_cat_length, len(cats)))
-
+
v_cat = item[2].split('|')
# 3. 统计电影类别单词,并给每个单词一个序号,放在movie_cat中
for cat in cats:
@@ -103,12 +103,11 @@
movie_cat[cat] = c_count
c_count += 1
v_cat = [movie_cat[k] for k in v_cat]
-
+
# 保存电影ID数据和title数据到字典中
- movie_info[v_id] = {'mov_id': int(v_id),
- 'category': v_cat}
-
-print("电影类别数量最多是:", max_cat_length)
+ movie_info[v_id] = {'mov_id': int(v_id), 'category': v_cat}
+
+print("电影类别数量最多是:", max_cat_length)
ID = 1
# 读取第一条数据,并打印
item = data[0]
@@ -128,7 +127,7 @@
item = item.strip().split("::")
# 1. 获得电影的ID信息
v_id = item[0]
- v_title = item[1][:-7] # 去掉title中年份数据
+ v_title = item[1][:-7] # 去掉title中年份数据
cats = item[2].split('|')
v_year = item[1][-5:-1]
@@ -145,23 +144,26 @@
c_count += 1
# 补0使电影名称对应的列表长度为15
v_tit = [movie_titles[k] for k in titles]
- while len(v_tit)<15:
+ while len(v_tit) < 15:
v_tit.append(0)
# 补0使电影种类对应的列表长度为6
v_cat = [movie_cat[k] for k in cats]
- while len(v_cat)<6:
+ while len(v_cat) < 6:
v_cat.append(0)
# 4. 保存电影数据到movie_info中
- movie_info[v_id] = {'mov_id': int(v_id),
- 'title': v_tit,
- 'category': v_cat,
- 'years': int(v_year)}
-
+ movie_info[v_id] = {
+ 'mov_id': int(v_id),
+ 'title': v_tit,
+ 'category': v_cat,
+ 'years': int(v_year)
+ }
+
print("电影数据数量:", len(movie_info))
ID = 2
-print("原始的电影ID为 {} 的数据是:".format(ID), data[ID-1])
+print("原始的电影ID为 {} 的数据是:".format(ID), data[ID - 1])
print("电影ID为 {} 的转换后数据是:".format(ID), movie_info[str(ID)])
+
def get_movie_info(path):
# 打开文件,编码方式选择ISO-8859-1,读取所有数据到data中
with open(path, 'r', encoding="ISO-8859-1") as f:
@@ -195,17 +197,19 @@ def get_movie_info(path):
c_count += 1
# 补0使电影名称对应的列表长度为15
v_tit = [movie_titles[k] for k in titles]
- while len(v_tit)<15:
+ while len(v_tit) < 15:
v_tit.append(0)
# 补0使电影种类对应的列表长度为6
v_cat = [movie_cat[k] for k in cats]
- while len(v_cat)<6:
+ while len(v_cat) < 6:
v_cat.append(0)
# 保存电影数据到movie_info中
- movie_info[v_id] = {'mov_id': int(v_id),
- 'title': v_tit,
- 'category': v_cat,
- 'years': int(v_year)}
+ movie_info[v_id] = {
+ 'mov_id': int(v_id),
+ 'title': v_tit,
+ 'category': v_cat,
+ 'years': int(v_year)
+ }
return movie_info, movie_cat, movie_titles
@@ -213,11 +217,10 @@ def get_movie_info(path):
movie_info, movie_cat, movie_titles = get_movie_info(movie_info_path)
print("电影数量:", len(movie_info))
ID = 1
-print("原始的电影ID为 {} 的数据是:".format(ID), data[ID-1])
+print("原始的电影ID为 {} 的数据是:".format(ID), data[ID - 1])
print("电影ID为 {} 的转换后数据是:".format(ID), movie_info[str(ID)])
-print("电影种类对应序号:'Animation':{} 'Children's':{} 'Comedy':{}".format(movie_cat['Animation'],
- movie_cat["Children's"],
- movie_cat['Comedy']))
-print("电影名称对应序号:'The':{} 'Story':{} ".format(movie_titles['The'], movie_titles['Story']))
-
+print("电影种类对应序号:'Animation':{} 'Children's':{} 'Comedy':{}".format(movie_cat[
+ 'Animation'], movie_cat["Children's"], movie_cat['Comedy']))
+print("电影名称对应序号:'The':{} 'Story':{} ".format(movie_titles['The'], movie_titles[
+ 'Story']))
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-3-ratings_data.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-3-ratings_data.py
old mode 100644
new mode 100755
index ce0bcac2d..590a69ff6
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-3-ratings_data.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-3-ratings_data.py
@@ -26,7 +26,7 @@
print(item)
item = item.strip().split("::")
-usr_id,movie_id,score = item[0],item[1],item[2]
+usr_id, movie_id, score = item[0], item[1], item[2]
print("评分数据条数:", len(data))
print("用户ID:", usr_id)
print("电影ID:", movie_id)
@@ -42,16 +42,15 @@ def get_rating_info(path):
for item in data:
item = item.strip().split("::")
# 处理每行数据,分别得到用户ID,电影ID,和评分
- usr_id,movie_id,score = item[0],item[1],item[2]
+ usr_id, movie_id, score = item[0], item[1], item[2]
if usr_id not in rating_info.keys():
- rating_info[usr_id] = {movie_id:float(score)}
+ rating_info[usr_id] = {movie_id: float(score)}
else:
rating_info[usr_id][movie_id] = float(score)
return rating_info
+
# 获得评分数据
#rating_path = "./work/ml-1m/ratings.dat"
rating_info = get_rating_info(rating_path)
print("ID为1的用户一共评价了{}个电影".format(len(rating_info['1'])))
-
-
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-4-poster_data.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-4-poster_data.py
old mode 100644
new mode 100755
index f3cfbae05..ff5ed31d9
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-4-poster_data.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-4-poster_data.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
# %matplotlib inline
from PIL import Image
import matplotlib.pyplot as plt
@@ -23,15 +22,15 @@
rating_path = "./work/ml-1m/new_rating.txt"
else:
rating_path = "./work/ml-1m/ratings.dat"
-
+
with open(rating_path, 'r') as f:
data = f.readlines()
-
+
# 从新的rating文件中收集所有的电影ID
mov_id_collect = []
for item in data:
item = item.strip().split("::")
- usr_id,movie_id,score = item[0],item[1],item[2]
+ usr_id, movie_id, score = item[0], item[1], item[2]
mov_id_collect.append(movie_id)
# 根据电影ID读取图像
@@ -40,10 +39,11 @@
# 显示mov_id_collect中第几个电影ID的图像
idx = 1
-poster = Image.open(poster_path+'mov_id{}.jpg'.format(str(mov_id_collect[idx])))
+poster = Image.open(poster_path + 'mov_id{}.jpg'.format(
+ str(mov_id_collect[idx])))
# poster = poster.resize([64, 64])
-plt.figure("Image") # 图像窗口名称
+plt.figure("Image") # 图像窗口名称
plt.imshow(poster)
-plt.axis('on') # 关掉坐标轴为 off
-plt.title("poster with ID {}".format(mov_id_collect[idx])) # 图像题目
-plt.show()
\ No newline at end of file
+plt.axis('on') # 关掉坐标轴为 off
+plt.title("poster with ID {}".format(mov_id_collect[idx])) # 图像题目
+plt.show()
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-5-creat_data_reader.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-5-creat_data_reader.py
old mode 100644
new mode 100755
index 23ac70102..9bc68431c
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-5-creat_data_reader.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-5-creat_data_reader.py
@@ -13,17 +13,19 @@
# limitations under the License.
import numpy as np
+
+
def get_usr_info(path):
# 性别转换函数,M-0, F-1
def gender2num(gender):
return 1 if gender == 'F' else 0
-
+
# 打开文件,读取所有行到data中
with open(path, 'r') as f:
data = f.readlines()
# 建立用户信息的字典
use_info = {}
-
+
max_usr_id = 0
#按行索引数据
for item in data:
@@ -31,14 +33,17 @@ def gender2num(gender):
item = item.strip().split("::")
usr_id = item[0]
# 将字符数据转成数字并保存在字典中
- use_info[usr_id] = {'usr_id': int(usr_id),
- 'gender': gender2num(item[1]),
- 'age': int(item[2]),
- 'job': int(item[3])}
+ use_info[usr_id] = {
+ 'usr_id': int(usr_id),
+ 'gender': gender2num(item[1]),
+ 'age': int(item[2]),
+ 'job': int(item[3])
+ }
max_usr_id = max(max_usr_id, int(usr_id))
-
+
return use_info, max_usr_id
+
usr_file = "./work/ml-1m/users.dat"
usr_info, max_usr_id = get_usr_info(usr_file)
@@ -76,17 +81,19 @@ def get_movie_info(path):
c_count += 1
# 补0使电影名称对应的列表长度为15
v_tit = [movie_titles[k] for k in titles]
- while len(v_tit)<15:
+ while len(v_tit) < 15:
v_tit.append(0)
# 补0使电影种类对应的列表长度为6
v_cat = [movie_cat[k] for k in cats]
- while len(v_cat)<6:
+ while len(v_cat) < 6:
v_cat.append(0)
# 保存电影数据到movie_info中
- movie_info[v_id] = {'mov_id': int(v_id),
- 'title': v_tit,
- 'category': v_cat,
- 'years': int(v_year)}
+ movie_info[v_id] = {
+ 'mov_id': int(v_id),
+ 'title': v_tit,
+ 'category': v_cat,
+ 'years': int(v_year)
+ }
return movie_info, movie_cat, movie_titles
@@ -103,13 +110,14 @@ def get_rating_info(path):
for item in data:
item = item.strip().split("::")
# 处理每行数据,分别得到用户ID,电影ID,和评分
- usr_id,movie_id,score = item[0],item[1],item[2]
+ usr_id, movie_id, score = item[0], item[1], item[2]
if usr_id not in rating_info.keys():
- rating_info[usr_id] = {movie_id:float(score)}
+ rating_info[usr_id] = {movie_id: float(score)}
else:
rating_info[usr_id][movie_id] = float(score)
return rating_info
+
# 获得评分数据
rating_path = "./work/ml-1m/ratings.dat"
rating_info = get_rating_info(rating_path)
@@ -121,40 +129,46 @@ def get_dataset(usr_info, rating_info, movie_info):
for usr_id in rating_info.keys():
usr_ratings = rating_info[usr_id]
for movie_id in usr_ratings:
- trainset.append({'usr_info': usr_info[usr_id],
- 'mov_info': movie_info[movie_id],
- 'scores': usr_ratings[movie_id]})
+ trainset.append({
+ 'usr_info': usr_info[usr_id],
+ 'mov_info': movie_info[movie_id],
+ 'scores': usr_ratings[movie_id]
+ })
return trainset
+
dataset = get_dataset(usr_info, rating_info, movie_info)
print("数据集总数据数:", len(dataset))
-
import random
+
+
def load_data(dataset=None, mode='train'):
-
"""定义一些超参数等等"""
-
+
# 定义数据迭代加载器
def data_generator():
-
""" 定义数据的处理过程"""
-
- data = None
+
+ data = None
yield data
-
+
# 返回数据迭代加载器
return data_generator
+
import random
use_poster = False
+
+
def load_data(dataset=None, mode='train'):
-
+
# 定义数据迭代Batch大小
BATCHSIZE = 256
data_length = len(dataset)
index_list = list(range(data_length))
+
# 定义数据迭代加载器
def data_generator():
# 训练模式下,打乱训练数据
@@ -179,7 +193,8 @@ def data_generator():
if use_poster:
# 不使用图像特征时,不读取图像数据,加快数据读取速度
- poster = Image.open(poster_path+'mov_id{}.jpg'.format(str(mov_id)))
+ poster = Image.open(poster_path + 'mov_id{}.jpg'.format(
+ str(mov_id)))
poster = poster.resize([64, 64])
if len(poster.size) <= 2:
poster = poster.convert("RGB")
@@ -188,7 +203,7 @@ def data_generator():
score_list.append(int(dataset[i]['scores']))
# 如果读取的数据量达到当前的batch大小,就返回当前批次
- if len(usr_id_list)==BATCHSIZE:
+ if len(usr_id_list) == BATCHSIZE:
# 转换列表数据为数组形式,reshape到固定形状
usr_id_arr = np.array(usr_id_list)
usr_gender_arr = np.array(usr_gender_list)
@@ -197,42 +212,51 @@ def data_generator():
mov_id_arr = np.array(mov_id_list)
- mov_cat_arr = np.reshape(np.array(mov_cat_list), [BATCHSIZE, 6]).astype(np.int64)
- mov_tit_arr = np.reshape(np.array(mov_tit_list), [BATCHSIZE, 1, 15]).astype(np.int64)
+ mov_cat_arr = np.reshape(
+ np.array(mov_cat_list), [BATCHSIZE, 6]).astype(np.int64)
+ mov_tit_arr = np.reshape(
+ np.array(mov_tit_list),
+ [BATCHSIZE, 1, 15]).astype(np.int64)
if use_poster:
- mov_poster_arr = np.reshape(np.array(mov_poster_list)/127.5 - 1, [BATCHSIZE, 3, 64, 64]).astype(np.float32)
+ mov_poster_arr = np.reshape(
+ np.array(mov_poster_list) / 127.5 - 1,
+ [BATCHSIZE, 3, 64, 64]).astype(np.float32)
else:
mov_poster_arr = np.array([0.])
-
- scores_arr = np.reshape(np.array(score_list), [-1, 1]).astype(np.float32)
-
+
+ scores_arr = np.reshape(np.array(score_list),
+ [-1, 1]).astype(np.float32)
+
# 返回当前批次数据
yield [usr_id_arr, usr_gender_arr, usr_age_arr, usr_job_arr], \
[mov_id_arr, mov_cat_arr, mov_tit_arr, mov_poster_arr], scores_arr
-
+
# 清空数据
usr_id_list, usr_gender_list, usr_age_list, usr_job_list = [], [], [], []
mov_id_list, mov_tit_list, mov_cat_list, score_list = [], [], [], []
mov_poster_list = []
+
return data_generator
+
dataset = get_dataset(usr_info, rating_info, movie_info)
print("数据集总数量:", len(dataset))
-trainset = dataset[:int(0.8*len(dataset))]
+trainset = dataset[:int(0.8 * len(dataset))]
train_loader = load_data(trainset, mode="train")
print("训练集数量:", len(trainset))
-validset = dataset[int(0.8*len(dataset)):]
+validset = dataset[int(0.8 * len(dataset)):]
valid_loader = load_data(validset, mode='valid')
print("验证集数量:", len(validset))
for idx, data in enumerate(train_loader()):
usr_data, mov_data, score = data
-
+
usr_id_arr, usr_gender_arr, usr_age_arr, usr_job_arr = usr_data
mov_id_arr, mov_cat_arr, mov_tit_arr, mov_poster_arr = mov_data
print("用户ID数据尺寸", usr_id_arr.shape)
- print("电影ID数据尺寸", mov_id_arr.shape, ", 电影类别genres数据的尺寸", mov_cat_arr.shape, ", 电影名字title的尺寸", mov_tit_arr.shape)
+ print("电影ID数据尺寸", mov_id_arr.shape, ", 电影类别genres数据的尺寸", mov_cat_arr.shape,
+ ", 电影名字title的尺寸", mov_tit_arr.shape)
break
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-6-data_process_full_code.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-6-data_process_full_code.py
old mode 100644
new mode 100755
index b2859f95d..3254bac34
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-2-6-data_process_full_code.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-2-6-data_process_full_code.py
@@ -1,4 +1,3 @@
-
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,6 +16,7 @@
import numpy as np
from PIL import Image
+
class MovieLen(object):
def __init__(self, use_poster):
self.use_poster = use_poster
@@ -30,10 +30,12 @@ def __init__(self, use_poster):
movie_info_path = "./work/ml-1m/movies.dat"
self.poster_path = "./work/ml-1m/posters/"
# 得到电影数据
- self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(movie_info_path)
+ self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(
+ movie_info_path)
# 记录电影的最大ID
self.max_mov_cat = np.max([self.movie_cat[k] for k in self.movie_cat])
- self.max_mov_tit = np.max([self.movie_title[k] for k in self.movie_title])
+ self.max_mov_tit = np.max(
+ [self.movie_title[k] for k in self.movie_title])
self.max_mov_id = np.max(list(map(int, self.movie_info.keys())))
# 记录用户数据的最大ID
self.max_usr_id = 0
@@ -44,15 +46,18 @@ def __init__(self, use_poster):
# 得到评分数据
self.rating_info = self.get_rating_info(rating_path)
# 构建数据集
- self.dataset = self.get_dataset(usr_info=self.usr_info,
- rating_info=self.rating_info,
- movie_info=self.movie_info)
+ self.dataset = self.get_dataset(
+ usr_info=self.usr_info,
+ rating_info=self.rating_info,
+ movie_info=self.movie_info)
# 划分数据集,获得数据加载器
- self.train_dataset = self.dataset[:int(len(self.dataset)*0.9)]
- self.valid_dataset = self.dataset[int(len(self.dataset)*0.9):]
+ self.train_dataset = self.dataset[:int(len(self.dataset) * 0.9)]
+ self.valid_dataset = self.dataset[int(len(self.dataset) * 0.9):]
print("##Total dataset instances: ", len(self.dataset))
print("##MovieLens dataset information: \nusr num: {}\n"
- "movies num: {}".format(len(self.usr_info),len(self.movie_info)))
+ "movies num: {}".format(
+ len(self.usr_info), len(self.movie_info)))
+
# 得到电影数据
def get_movie_info(self, path):
# 打开文件,编码方式选择ISO-8859-1,读取所有数据到data中
@@ -85,17 +90,19 @@ def get_movie_info(self, path):
c_count += 1
# 补0使电影名称对应的列表长度为15
v_tit = [movie_titles[k] for k in titles]
- while len(v_tit)<15:
+ while len(v_tit) < 15:
v_tit.append(0)
# 补0使电影种类对应的列表长度为6
v_cat = [movie_cat[k] for k in cats]
- while len(v_cat)<6:
+ while len(v_cat) < 6:
v_cat.append(0)
# 保存电影数据到movie_info中
- movie_info[v_id] = {'mov_id': int(v_id),
- 'title': v_tit,
- 'category': v_cat,
- 'years': int(v_year)}
+ movie_info[v_id] = {
+ 'mov_id': int(v_id),
+ 'title': v_tit,
+ 'category': v_cat,
+ 'years': int(v_year)
+ }
return movie_info, movie_cat, movie_titles
def get_usr_info(self, path):
@@ -116,14 +123,17 @@ def gender2num(gender):
item = item.strip().split("::")
usr_id = item[0]
# 将字符数据转成数字并保存在字典中
- use_info[usr_id] = {'usr_id': int(usr_id),
- 'gender': gender2num(item[1]),
- 'age': int(item[2]),
- 'job': int(item[3])}
+ use_info[usr_id] = {
+ 'usr_id': int(usr_id),
+ 'gender': gender2num(item[1]),
+ 'age': int(item[2]),
+ 'job': int(item[3])
+ }
self.max_usr_id = max(self.max_usr_id, int(usr_id))
self.max_usr_age = max(self.max_usr_age, int(item[2]))
self.max_usr_job = max(self.max_usr_job, int(item[3]))
return use_info
+
# 得到评分数据
def get_rating_info(self, path):
# 读取文件里的数据
@@ -133,23 +143,26 @@ def get_rating_info(self, path):
rating_info = {}
for item in data:
item = item.strip().split("::")
- usr_id,movie_id,score = item[0],item[1],item[2]
+ usr_id, movie_id, score = item[0], item[1], item[2]
if usr_id not in rating_info.keys():
- rating_info[usr_id] = {movie_id:float(score)}
+ rating_info[usr_id] = {movie_id: float(score)}
else:
rating_info[usr_id][movie_id] = float(score)
return rating_info
+
# 构建数据集
def get_dataset(self, usr_info, rating_info, movie_info):
trainset = []
for usr_id in rating_info.keys():
usr_ratings = rating_info[usr_id]
for movie_id in usr_ratings:
- trainset.append({'usr_info': usr_info[usr_id],
- 'mov_info': movie_info[movie_id],
- 'scores': usr_ratings[movie_id]})
+ trainset.append({
+ 'usr_info': usr_info[usr_id],
+ 'mov_info': movie_info[movie_id],
+ 'scores': usr_ratings[movie_id]
+ })
return trainset
-
+
def load_data(self, dataset=None, mode='train'):
use_poster = False
@@ -158,6 +171,7 @@ def load_data(self, dataset=None, mode='train'):
data_length = len(dataset)
index_list = list(range(data_length))
+
# 定义数据迭代加载器
def data_generator():
# 训练模式下,打乱训练数据
@@ -182,7 +196,8 @@ def data_generator():
if use_poster:
# 不使用图像特征时,不读取图像数据,加快数据读取速度
- poster = Image.open(self.poster_path+'mov_id{}.jpg'.format(str(mov_id[0])))
+ poster = Image.open(self.poster_path +
+ 'mov_id{}.jpg'.format(str(mov_id[0])))
poster = poster.resize([64, 64])
if len(poster.size) <= 2:
poster = poster.convert("RGB")
@@ -191,7 +206,7 @@ def data_generator():
score_list.append(int(dataset[i]['scores']))
# 如果读取的数据量达到当前的batch大小,就返回当前批次
- if len(usr_id_list)==BATCHSIZE:
+ if len(usr_id_list) == BATCHSIZE:
# 转换列表数据为数组形式,reshape到固定形状
usr_id_arr = np.array(usr_id_list)
usr_gender_arr = np.array(usr_gender_list)
@@ -199,15 +214,22 @@ def data_generator():
usr_job_arr = np.array(usr_job_list)
mov_id_arr = np.array(mov_id_list)
- mov_cat_arr = np.reshape(np.array(mov_cat_list), [BATCHSIZE, 6]).astype(np.int64)
- mov_tit_arr = np.reshape(np.array(mov_tit_list), [BATCHSIZE, 1, 15]).astype(np.int64)
+ mov_cat_arr = np.reshape(
+ np.array(mov_cat_list),
+ [BATCHSIZE, 6]).astype(np.int64)
+ mov_tit_arr = np.reshape(
+ np.array(mov_tit_list),
+ [BATCHSIZE, 1, 15]).astype(np.int64)
if use_poster:
- mov_poster_arr = np.reshape(np.array(mov_poster_list)/127.5 - 1, [BATCHSIZE, 3, 64, 64]).astype(np.float32)
+ mov_poster_arr = np.reshape(
+ np.array(mov_poster_list) / 127.5 - 1,
+ [BATCHSIZE, 3, 64, 64]).astype(np.float32)
else:
mov_poster_arr = np.array([0.])
- scores_arr = np.reshape(np.array(score_list), [-1, 1]).astype(np.float32)
+ scores_arr = np.reshape(np.array(score_list),
+ [-1, 1]).astype(np.float32)
# 放回当前批次数据
yield [usr_id_arr, usr_gender_arr, usr_age_arr, usr_job_arr], \
@@ -217,8 +239,10 @@ def data_generator():
usr_id_list, usr_gender_list, usr_age_list, usr_job_list = [], [], [], []
mov_id_list, mov_tit_list, mov_cat_list, score_list = [], [], [], []
mov_poster_list = []
+
return data_generator
+
# 声明数据读取类
dataset = MovieLen(False)
# 定义数据读取器
@@ -232,5 +256,5 @@ def data_generator():
print("打印电影ID,名字,类别数据的维度:")
for v in mov:
print(v.shape)
-
- break
\ No newline at end of file
+
+ break
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-1-embedding.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-1-embedding.py
old mode 100644
new mode 100755
index 34f2177de..b109d4180
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-1-embedding.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-1-embedding.py
@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-
import paddle
from paddle.nn import Linear, Embedding, Conv2D
import numpy as np
@@ -22,9 +20,7 @@
# 声明用户的最大ID,在此基础上加1(算上数字0)
USR_ID_NUM = 6040 + 1
# 声明Embedding 层,将ID映射为32长度的向量
-usr_emb = Embedding(num_embeddings=USR_ID_NUM,
- embedding_dim=32,
- sparse=False)
+usr_emb = Embedding(num_embeddings=USR_ID_NUM, embedding_dim=32, sparse=False)
# 声明输入数据,将其转成tensor
arr_1 = np.array([1], dtype="int64").reshape((-1))
print(arr_1)
@@ -35,13 +31,10 @@
# 打印结果
print("数字 1 的embedding结果是: ", emb_res.numpy(), "\n形状是:", emb_res.shape)
-
# 声明用户的最大ID,在此基础上加1(算上数字0)
USR_ID_NUM = 10
# 声明Embedding 层,将ID映射为16长度的向量
-usr_emb = Embedding(num_embeddings=USR_ID_NUM,
- embedding_dim=16,
- sparse=False)
+usr_emb = Embedding(num_embeddings=USR_ID_NUM, embedding_dim=16, sparse=False)
# 定义输入数据,输入数据为不超过10的整数,将其转成tensor
arr = np.random.randint(0, 10, (3)).reshape((-1)).astype('int64')
print("输入数据是:", arr)
@@ -60,9 +53,7 @@
init = paddle.nn.initializer.KaimingNormal()
param_attr = paddle.ParamAttr(initializer=init)
-usr_emb2 = Embedding(num_embeddings=USR_ID_NUM,
- embedding_dim=16,
- weight_attr=param_attr)
+usr_emb2 = Embedding(
+ num_embeddings=USR_ID_NUM, embedding_dim=16, weight_attr=param_attr)
emb_res = usr_emb2(arr_pd)
print("\KaimingNormal初始化权重embedding层的映射结果是:", emb_res.numpy())
-
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-10-movie-feature-merge.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-10-movie-feature-merge.py
old mode 100644
new mode 100755
index d1df255e1..1ab56ccfb
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-10-movie-feature-merge.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-10-movie-feature-merge.py
@@ -24,14 +24,14 @@
mov_emb = Embedding(num_embeddings=MOV_DICT_SIZE, embedding_dim=32)
mov_fc = Linear(32, 32)
-
print("输入的电影ID是:", mov_id_data)
mov_id_data = paddle.to_tensor(mov_id_data)
mov_id_feat = mov_fc(mov_emb(mov_id_data))
mov_id_feat = F.relu(mov_id_feat)
# 自定义一个电影类别数据
-mov_cat_data = np.array(((1, 2, 3, 0, 0, 0), (2, 3, 4, 0, 0, 0))).reshape(2, -1).astype('int64')
+mov_cat_data = np.array(((1, 2, 3, 0, 0, 0),
+ (2, 3, 4, 0, 0, 0))).reshape(2, -1).astype('int64')
# 对电影ID信息做映射,并紧接着一个Linear层
MOV_DICT_SIZE = 6 + 1
mov_emb = Embedding(num_embeddings=MOV_DICT_SIZE, embedding_dim=32)
@@ -49,14 +49,21 @@
mov_cat_feat = F.relu(mov_cat_feat)
# 自定义两个电影名称数据
-mov_title_data = np.array(((1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
- (2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))).reshape(2, 1, 15).astype('int64')
+mov_title_data = np.array(((1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ (2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0))).reshape(2, 1, 15).astype('int64')
# 对电影名称做映射,紧接着FC和pool层
MOV_TITLE_DICT_SIZE = 1000 + 1
mov_title_emb = Embedding(num_embeddings=MOV_TITLE_DICT_SIZE, embedding_dim=32)
-mov_title_conv = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=(2, 1), padding=0)
+mov_title_conv = Conv2D(
+ in_channels=1,
+ out_channels=1,
+ kernel_size=(3, 1),
+ stride=(2, 1),
+ padding=0)
# 使用 3 * 3卷积层代替全连接层
-mov_title_conv2 = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=1, padding=0)
+mov_title_conv2 = Conv2D(
+ in_channels=1, out_channels=1, kernel_size=(3, 1), stride=1, padding=0)
mov_title_data = paddle.to_tensor(mov_title_data)
print("电影名称数据的输入形状: ", mov_title_data.shape)
@@ -77,7 +84,6 @@
mov_title_feat = F.relu(mov_title_feat)
mov_title_feat = paddle.reshape(mov_title_feat, [batch_size, -1])
-
mov_combined = Linear(in_features=96, out_features=200)
# 收集所有的电影特征
_features = [mov_id_feat, mov_cat_feat, mov_title_feat]
@@ -88,4 +94,4 @@
mov_feat = paddle.concat(_features, axis=1)
mov_feat = mov_combined(mov_feat)
mov_feat = F.tanh(mov_feat)
-print("融合后的电影特征维度是:", mov_feat.shape)
\ No newline at end of file
+print("融合后的电影特征维度是:", mov_feat.shape)
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-11-similarity.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-11-similarity.py
old mode 100644
new mode 100755
index 81101a3d1..6770d1586
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-11-similarity.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-11-similarity.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
from paddle.nn import Linear, Embedding, Conv2D
import numpy as np
@@ -24,9 +23,7 @@
USR_ID_NUM = 6040 + 1
# 定义用户ID的embedding层和fc层
-usr_emb = Embedding(num_embeddings=USR_ID_NUM,
- embedding_dim=32,
- sparse=False)
+usr_emb = Embedding(num_embeddings=USR_ID_NUM, embedding_dim=32, sparse=False)
usr_fc = Linear(in_features=32, out_features=32)
usr_id_var = paddle.to_tensor(usr_id_data)
@@ -41,7 +38,7 @@
# 对用户职业信息做映射,并紧接着一个Linear层
# 用户职业的最大ID是20,所以Embedding层size的第一个参数设置为20 + 1 = 21
USR_JOB_DICT_SIZE = 20 + 1
-usr_job_emb = Embedding(num_embeddings=USR_JOB_DICT_SIZE,embedding_dim=16)
+usr_job_emb = Embedding(num_embeddings=USR_JOB_DICT_SIZE, embedding_dim=16)
usr_job_fc = Linear(in_features=16, out_features=16)
usr_job = paddle.to_tensor(usr_job_data)
@@ -57,8 +54,7 @@
# 年龄的最大ID是56,所以Embedding层size的第一个参数设置为56 + 1 = 57
USR_AGE_DICT_SIZE = 56 + 1
-usr_age_emb = Embedding(num_embeddings=USR_AGE_DICT_SIZE,
- embedding_dim=16)
+usr_age_emb = Embedding(num_embeddings=USR_AGE_DICT_SIZE, embedding_dim=16)
usr_age_fc = Linear(in_features=16, out_features=16)
usr_age = paddle.to_tensor(usr_age_data)
@@ -75,8 +71,8 @@
USR_ID_NUM = 2
# 对用户性别信息做映射,并紧接着一个FC层
USR_GENDER_DICT_SIZE = 2
-usr_gender_emb = Embedding(num_embeddings=USR_GENDER_DICT_SIZE,
- embedding_dim=16)
+usr_gender_emb = Embedding(
+ num_embeddings=USR_GENDER_DICT_SIZE, embedding_dim=16)
usr_gender_fc = Linear(in_features=16, out_features=16)
@@ -98,7 +94,6 @@
usr_feat = paddle.concat(_features, axis=1)
usr_feat = F.tanh(usr_combined(usr_feat))
-
# 自定义一个电影ID数据
mov_id_data = np.array((1, 2)).reshape(-1).astype('int64')
# 对电影ID信息做映射,并紧接着一个FC层
@@ -106,14 +101,14 @@
mov_emb = Embedding(num_embeddings=MOV_DICT_SIZE, embedding_dim=32)
mov_fc = Linear(32, 32)
-
print("输入的电影ID是:", mov_id_data)
mov_id_data = paddle.to_tensor(mov_id_data)
mov_id_feat = mov_fc(mov_emb(mov_id_data))
mov_id_feat = F.relu(mov_id_feat)
# 自定义一个电影类别数据
-mov_cat_data = np.array(((1, 2, 3, 0, 0, 0), (2, 3, 4, 0, 0, 0))).reshape(2, -1).astype('int64')
+mov_cat_data = np.array(((1, 2, 3, 0, 0, 0),
+ (2, 3, 4, 0, 0, 0))).reshape(2, -1).astype('int64')
# 对电影ID信息做映射,并紧接着一个Linear层
MOV_DICT_SIZE = 6 + 1
mov_emb = Embedding(num_embeddings=MOV_DICT_SIZE, embedding_dim=32)
@@ -131,14 +126,21 @@
mov_cat_feat = F.relu(mov_cat_feat)
# 自定义两个电影名称数据
-mov_title_data = np.array(((1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
- (2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))).reshape(2, 1, 15).astype('int64')
+mov_title_data = np.array(((1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ (2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0))).reshape(2, 1, 15).astype('int64')
# 对电影名称做映射,紧接着FC和pool层
MOV_TITLE_DICT_SIZE = 1000 + 1
mov_title_emb = Embedding(num_embeddings=MOV_TITLE_DICT_SIZE, embedding_dim=32)
-mov_title_conv = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=(2, 1), padding=0)
+mov_title_conv = Conv2D(
+ in_channels=1,
+ out_channels=1,
+ kernel_size=(3, 1),
+ stride=(2, 1),
+ padding=0)
# 使用 3 * 3卷积层代替全连接层
-mov_title_conv2 = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=1, padding=0)
+mov_title_conv2 = Conv2D(
+ in_channels=1, out_channels=1, kernel_size=(3, 1), stride=1, padding=0)
mov_title_data = paddle.to_tensor(mov_title_data)
print("电影名称数据的输入形状: ", mov_title_data.shape)
@@ -170,11 +172,13 @@
mov_feat = mov_combined(mov_feat)
mov_feat = F.tanh(mov_feat)
+
def similarty(usr_feature, mov_feature):
res = F.common.cosine_similarity(usr_feature, mov_feature)
res = paddle.scale(res, scale=5)
return usr_feat, mov_feat, res
+
# 使用上文计算得到的用户特征和电影特征计算相似度
_sim = similarty(usr_feat, mov_feat)
-print("相似度是:", np.squeeze(_sim[-1].numpy()))
\ No newline at end of file
+print("相似度是:", np.squeeze(_sim[-1].numpy()))
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-12-movie-model.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-12-movie-model.py
old mode 100644
new mode 100755
index 58271a334..ddc8de7cd
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-12-movie-model.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-12-movie-model.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
from paddle.nn import Linear, Embedding, Conv2D
import numpy as np
@@ -22,6 +21,7 @@
import numpy as np
from PIL import Image
+
class MovieLen(object):
def __init__(self, use_poster):
self.use_poster = use_poster
@@ -35,10 +35,12 @@ def __init__(self, use_poster):
movie_info_path = "./work/ml-1m/movies.dat"
self.poster_path = "./work/ml-1m/posters/"
# 得到电影数据
- self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(movie_info_path)
+ self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(
+ movie_info_path)
# 记录电影的最大ID
self.max_mov_cat = np.max([self.movie_cat[k] for k in self.movie_cat])
- self.max_mov_tit = np.max([self.movie_title[k] for k in self.movie_title])
+ self.max_mov_tit = np.max(
+ [self.movie_title[k] for k in self.movie_title])
self.max_mov_id = np.max(list(map(int, self.movie_info.keys())))
# 记录用户数据的最大ID
self.max_usr_id = 0
@@ -49,15 +51,18 @@ def __init__(self, use_poster):
# 得到评分数据
self.rating_info = self.get_rating_info(rating_path)
# 构建数据集
- self.dataset = self.get_dataset(usr_info=self.usr_info,
- rating_info=self.rating_info,
- movie_info=self.movie_info)
+ self.dataset = self.get_dataset(
+ usr_info=self.usr_info,
+ rating_info=self.rating_info,
+ movie_info=self.movie_info)
# 划分数据集,获得数据加载器
- self.train_dataset = self.dataset[:int(len(self.dataset)*0.9)]
- self.valid_dataset = self.dataset[int(len(self.dataset)*0.9):]
+ self.train_dataset = self.dataset[:int(len(self.dataset) * 0.9)]
+ self.valid_dataset = self.dataset[int(len(self.dataset) * 0.9):]
print("##Total dataset instances: ", len(self.dataset))
print("##MovieLens dataset information: \nusr num: {}\n"
- "movies num: {}".format(len(self.usr_info),len(self.movie_info)))
+ "movies num: {}".format(
+ len(self.usr_info), len(self.movie_info)))
+
# 得到电影数据
def get_movie_info(self, path):
# 打开文件,编码方式选择ISO-8859-1,读取所有数据到data中
@@ -90,17 +95,19 @@ def get_movie_info(self, path):
c_count += 1
# 补0使电影名称对应的列表长度为15
v_tit = [movie_titles[k] for k in titles]
- while len(v_tit)<15:
+ while len(v_tit) < 15:
v_tit.append(0)
# 补0使电影种类对应的列表长度为6
v_cat = [movie_cat[k] for k in cats]
- while len(v_cat)<6:
+ while len(v_cat) < 6:
v_cat.append(0)
# 保存电影数据到movie_info中
- movie_info[v_id] = {'mov_id': int(v_id),
- 'title': v_tit,
- 'category': v_cat,
- 'years': int(v_year)}
+ movie_info[v_id] = {
+ 'mov_id': int(v_id),
+ 'title': v_tit,
+ 'category': v_cat,
+ 'years': int(v_year)
+ }
return movie_info, movie_cat, movie_titles
def get_usr_info(self, path):
@@ -121,14 +128,17 @@ def gender2num(gender):
item = item.strip().split("::")
usr_id = item[0]
# 将字符数据转成数字并保存在字典中
- use_info[usr_id] = {'usr_id': int(usr_id),
- 'gender': gender2num(item[1]),
- 'age': int(item[2]),
- 'job': int(item[3])}
+ use_info[usr_id] = {
+ 'usr_id': int(usr_id),
+ 'gender': gender2num(item[1]),
+ 'age': int(item[2]),
+ 'job': int(item[3])
+ }
self.max_usr_id = max(self.max_usr_id, int(usr_id))
self.max_usr_age = max(self.max_usr_age, int(item[2]))
self.max_usr_job = max(self.max_usr_job, int(item[3]))
return use_info
+
# 得到评分数据
def get_rating_info(self, path):
# 读取文件里的数据
@@ -138,23 +148,26 @@ def get_rating_info(self, path):
rating_info = {}
for item in data:
item = item.strip().split("::")
- usr_id,movie_id,score = item[0],item[1],item[2]
+ usr_id, movie_id, score = item[0], item[1], item[2]
if usr_id not in rating_info.keys():
- rating_info[usr_id] = {movie_id:float(score)}
+ rating_info[usr_id] = {movie_id: float(score)}
else:
rating_info[usr_id][movie_id] = float(score)
return rating_info
+
# 构建数据集
def get_dataset(self, usr_info, rating_info, movie_info):
trainset = []
for usr_id in rating_info.keys():
usr_ratings = rating_info[usr_id]
for movie_id in usr_ratings:
- trainset.append({'usr_info': usr_info[usr_id],
- 'mov_info': movie_info[movie_id],
- 'scores': usr_ratings[movie_id]})
+ trainset.append({
+ 'usr_info': usr_info[usr_id],
+ 'mov_info': movie_info[movie_id],
+ 'scores': usr_ratings[movie_id]
+ })
return trainset
-
+
def load_data(self, dataset=None, mode='train'):
use_poster = False
@@ -163,6 +176,7 @@ def load_data(self, dataset=None, mode='train'):
data_length = len(dataset)
index_list = list(range(data_length))
+
# 定义数据迭代加载器
def data_generator():
# 训练模式下,打乱训练数据
@@ -187,7 +201,8 @@ def data_generator():
if use_poster:
# 不使用图像特征时,不读取图像数据,加快数据读取速度
- poster = Image.open(self.poster_path+'mov_id{}.jpg'.format(str(mov_id[0])))
+ poster = Image.open(self.poster_path +
+ 'mov_id{}.jpg'.format(str(mov_id[0])))
poster = poster.resize([64, 64])
if len(poster.size) <= 2:
poster = poster.convert("RGB")
@@ -196,7 +211,7 @@ def data_generator():
score_list.append(int(dataset[i]['scores']))
# 如果读取的数据量达到当前的batch大小,就返回当前批次
- if len(usr_id_list)==BATCHSIZE:
+ if len(usr_id_list) == BATCHSIZE:
# 转换列表数据为数组形式,reshape到固定形状
usr_id_arr = np.array(usr_id_list)
usr_gender_arr = np.array(usr_gender_list)
@@ -204,15 +219,21 @@ def data_generator():
usr_job_arr = np.array(usr_job_list)
mov_id_arr = np.array(mov_id_list)
- mov_cat_arr = np.reshape(np.array(mov_cat_list), [BATCHSIZE, 6]).astype(np.int64)
- mov_tit_arr = np.reshape(np.array(mov_tit_list), [BATCHSIZE, 1, 15]).astype(np.int64)
+ mov_cat_arr = np.reshape(
+ np.array(mov_cat_list), [BATCHSIZE, 6]).astype(np.int64)
+ mov_tit_arr = np.reshape(
+ np.array(mov_tit_list),
+ [BATCHSIZE, 1, 15]).astype(np.int64)
if use_poster:
- mov_poster_arr = np.reshape(np.array(mov_poster_list)/127.5 - 1, [BATCHSIZE, 3, 64, 64]).astype(np.float32)
+ mov_poster_arr = np.reshape(
+ np.array(mov_poster_list) / 127.5 - 1,
+ [BATCHSIZE, 3, 64, 64]).astype(np.float32)
else:
mov_poster_arr = np.array([0.])
- scores_arr = np.reshape(np.array(score_list), [-1, 1]).astype(np.float32)
+ scores_arr = np.reshape(np.array(score_list),
+ [-1, 1]).astype(np.float32)
# 放回当前批次数据
yield [usr_id_arr, usr_gender_arr, usr_age_arr, usr_job_arr], \
@@ -222,48 +243,63 @@ def data_generator():
usr_id_list, usr_gender_list, usr_age_list, usr_job_list = [], [], [], []
mov_id_list, mov_tit_list, mov_cat_list, score_list = [], [], [], []
mov_poster_list = []
+
return data_generator
-
+
class MovModel(paddle.nn.Layer):
- def __init__(self, use_poster, use_mov_title, use_mov_cat, use_age_job,fc_sizes):
+ def __init__(self, use_poster, use_mov_title, use_mov_cat, use_age_job,
+ fc_sizes):
super(MovModel, self).__init__()
-
+
# 将传入的name信息和bool型参数添加到模型类中
self.use_mov_poster = use_poster
self.use_mov_title = use_mov_title
self.use_usr_age_job = use_age_job
self.use_mov_cat = use_mov_cat
self.fc_sizes = fc_sizes
-
+
# 获取数据集的信息,并构建训练和验证集的数据迭代器
Dataset = MovieLen(self.use_mov_poster)
self.Dataset = Dataset
self.trainset = self.Dataset.train_dataset
self.valset = self.Dataset.valid_dataset
- self.train_loader = self.Dataset.load_data(dataset=self.trainset, mode='train')
- self.valid_loader = self.Dataset.load_data(dataset=self.valset, mode='valid')
-
+ self.train_loader = self.Dataset.load_data(
+ dataset=self.trainset, mode='train')
+ self.valid_loader = self.Dataset.load_data(
+ dataset=self.valset, mode='valid')
""" define network layer for embedding usr info """
# 对电影ID信息做映射,并紧接着一个Linear层
MOV_DICT_SIZE = Dataset.max_mov_id + 1
self.mov_emb = Embedding(num_embeddings=MOV_DICT_SIZE, embedding_dim=32)
self.mov_fc = Linear(32, 32)
-
+
# 对电影类别做映射
CATEGORY_DICT_SIZE = len(Dataset.movie_cat) + 1
- self.mov_cat_emb = Embedding(num_embeddings=CATEGORY_DICT_SIZE, embedding_dim=32)
+ self.mov_cat_emb = Embedding(
+ num_embeddings=CATEGORY_DICT_SIZE, embedding_dim=32)
self.mov_cat_fc = Linear(32, 32)
-
+
# 对电影名称做映射
MOV_TITLE_DICT_SIZE = len(Dataset.movie_title) + 1
- self.mov_title_emb = Embedding(num_embeddings=MOV_TITLE_DICT_SIZE, embedding_dim=32)
- self.mov_title_conv = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=(2,1), padding=0)
- self.mov_title_conv2 = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=1, padding=0)
+ self.mov_title_emb = Embedding(
+ num_embeddings=MOV_TITLE_DICT_SIZE, embedding_dim=32)
+ self.mov_title_conv = Conv2D(
+ in_channels=1,
+ out_channels=1,
+ kernel_size=(3, 1),
+ stride=(2, 1),
+ padding=0)
+ self.mov_title_conv2 = Conv2D(
+ in_channels=1,
+ out_channels=1,
+ kernel_size=(3, 1),
+ stride=1,
+ padding=0)
# 新建一个Linear层,用于整合电影特征
self.mov_concat_embed = Linear(in_features=96, out_features=200)
-
+
#电影特征和用户特征使用了不同的全连接层,不共享参数
movie_sizes = [200] + self.fc_sizes
acts = ["relu" for _ in range(len(self.fc_sizes))]
@@ -293,7 +329,7 @@ def get_mov_feat(self, mov_var):
mov_id = self.mov_fc(mov_id)
mov_id = F.relu(mov_id)
feats_collect.append(mov_id)
-
+
# 如果使用电影的种类数据,计算电影种类特征的映射
if self.use_mov_cat:
# 计算电影种类的特征映射,对多个种类的特征求和得到最终特征
@@ -307,13 +343,14 @@ def get_mov_feat(self, mov_var):
if self.use_mov_title:
# 计算电影名字的特征映射,对特征映射使用卷积计算最终的特征
mov_title = self.mov_title_emb(mov_title)
- mov_title = F.relu(self.mov_title_conv2(F.relu(self.mov_title_conv(mov_title))))
-
+ mov_title = F.relu(
+ self.mov_title_conv2(F.relu(self.mov_title_conv(mov_title))))
+
mov_title = paddle.sum(mov_title, axis=2, keepdim=False)
mov_title = F.relu(mov_title)
mov_title = paddle.reshape(mov_title, [batch_size, -1])
feats_collect.append(mov_title)
-
+
# 使用一个全连接层,整合所有电影特征,映射为一个200维的特征向量
mov_feat = paddle.concat(feats_collect, axis=1)
mov_features = F.tanh(self.mov_concat_embed(mov_feat))
@@ -321,9 +358,15 @@ def get_mov_feat(self, mov_var):
mov_features = n_layer(mov_features)
return mov_features
+
## 测试电影特征提取网络
-fc_sizes=[128, 64, 32]
-model = MovModel(use_poster=False, use_mov_title=True, use_mov_cat=True, use_age_job=True,fc_sizes=fc_sizes)
+fc_sizes = [128, 64, 32]
+model = MovModel(
+ use_poster=False,
+ use_mov_title=True,
+ use_mov_cat=True,
+ use_age_job=True,
+ fc_sizes=fc_sizes)
model.eval()
data_loader = model.train_loader
@@ -333,10 +376,10 @@ def get_mov_feat(self, mov_var):
usr, mov, score = data
# 只使用每个Batch的第一条数据
mov_v = [var[0:1] for var in mov]
-
+
_mov_v = [np.squeeze(var[0:1]) for var in mov]
print("输入的电影ID数据:{}\n类别数据:{} \n名称数据:{} ".format(*_mov_v))
mov_v = [paddle.to_tensor(var) for var in mov_v]
mov_feat = model.get_mov_feat(mov_v)
print("计算得到的电影特征维度是:", mov_feat.shape)
- break
\ No newline at end of file
+ break
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-2-id-embedding.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-2-id-embedding.py
old mode 100644
new mode 100755
index 38bc3cba9..2faf1db01
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-2-id-embedding.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-2-id-embedding.py
@@ -1,4 +1,3 @@
-
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-
import paddle
from paddle.nn import Linear, Embedding, Conv2D
import numpy as np
@@ -26,9 +23,7 @@
USR_ID_NUM = 6040 + 1
# 定义用户ID的embedding层和fc层
-usr_emb = Embedding(num_embeddings=USR_ID_NUM,
- embedding_dim=32,
- sparse=False)
+usr_emb = Embedding(num_embeddings=USR_ID_NUM, embedding_dim=32, sparse=False)
usr_fc = Linear(in_features=32, out_features=32)
usr_id_var = paddle.to_tensor(usr_id_data)
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-3-gender-embedding.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-3-gender-embedding.py
old mode 100644
new mode 100755
index 1425f924f..01c584a59
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-3-gender-embedding.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-3-gender-embedding.py
@@ -1,4 +1,3 @@
-
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,14 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-
import paddle
from paddle.nn import Linear, Embedding, Conv2D
import numpy as np
import paddle.nn.functional as F
-
# 自定义一个用户性别数据
usr_gender_data = np.array((0, 1)).reshape(-1).astype('int64')
print("输入的用户性别是:", usr_gender_data)
@@ -30,14 +26,15 @@
USR_ID_NUM = 2
# 对用户性别信息做映射,并紧接着一个FC层
USR_GENDER_DICT_SIZE = 2
-usr_gender_emb = Embedding(num_embeddings=USR_GENDER_DICT_SIZE,
- embedding_dim=16)
+usr_gender_emb = Embedding(
+ num_embeddings=USR_GENDER_DICT_SIZE, embedding_dim=16)
usr_gender_fc = Linear(in_features=16, out_features=16)
usr_gender_var = paddle.to_tensor(usr_gender_data)
usr_gender_feat = usr_gender_fc(usr_gender_emb(usr_gender_var))
usr_gender_feat = F.relu(usr_gender_feat)
-print("用户性别特征的数据特征是:", usr_gender_feat.numpy(), "\n其形状是:", usr_gender_feat.shape)
+print("用户性别特征的数据特征是:",
+ usr_gender_feat.numpy(), "\n其形状是:", usr_gender_feat.shape)
print("\n性别 0 对应的特征是:", usr_gender_feat.numpy()[0, :])
-print("性别 1 对应的特征是:", usr_gender_feat.numpy()[1, :])
\ No newline at end of file
+print("性别 1 对应的特征是:", usr_gender_feat.numpy()[1, :])
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-4-age-embedding.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-4-age-embedding.py
old mode 100644
new mode 100755
index 534764bf7..7ff882975
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-4-age-embedding.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-4-age-embedding.py
@@ -1,4 +1,3 @@
-
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,7 +17,6 @@
import numpy as np
import paddle.nn.functional as F
-
# 自定义一个用户年龄数据
usr_age_data = np.array((1, 18)).reshape(-1).astype('int64')
print("输入的用户年龄是:", usr_age_data)
@@ -27,8 +25,7 @@
# 年龄的最大ID是56,所以Embedding层size的第一个参数设置为56 + 1 = 57
USR_AGE_DICT_SIZE = 56 + 1
-usr_age_emb = Embedding(num_embeddings=USR_AGE_DICT_SIZE,
- embedding_dim=16)
+usr_age_emb = Embedding(num_embeddings=USR_AGE_DICT_SIZE, embedding_dim=16)
usr_age_fc = Linear(in_features=16, out_features=16)
usr_age = paddle.to_tensor(usr_age_data)
@@ -38,4 +35,4 @@
print("用户年龄特征的数据特征是:", usr_age_feat.numpy(), "\n其形状是:", usr_age_feat.shape)
print("\n年龄 1 对应的特征是:", usr_age_feat.numpy()[0, :])
-print("年龄 18 对应的特征是:", usr_age_feat.numpy()[1, :])
\ No newline at end of file
+print("年龄 18 对应的特征是:", usr_age_feat.numpy()[1, :])
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-5-job-embedding.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-5-job-embedding.py
old mode 100644
new mode 100755
index 73d8e6cf6..c4c8939ac
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-5-job-embedding.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-5-job-embedding.py
@@ -24,7 +24,7 @@
# 对用户职业信息做映射,并紧接着一个Linear层
# 用户职业的最大ID是20,所以Embedding层size的第一个参数设置为20 + 1 = 21
USR_JOB_DICT_SIZE = 20 + 1
-usr_job_emb = Embedding(num_embeddings=USR_JOB_DICT_SIZE,embedding_dim=16)
+usr_job_emb = Embedding(num_embeddings=USR_JOB_DICT_SIZE, embedding_dim=16)
usr_job_fc = Linear(in_features=16, out_features=16)
usr_job = paddle.to_tensor(usr_job_data)
@@ -34,4 +34,4 @@
print("用户年龄特征的数据特征是:", usr_job_feat.numpy(), "\n其形状是:", usr_job_feat.shape)
print("\n职业 0 对应的特征是:", usr_job_feat.numpy()[0, :])
-print("职业 20 对应的特征是:", usr_job_feat.numpy()[1, :])
\ No newline at end of file
+print("职业 20 对应的特征是:", usr_job_feat.numpy()[1, :])
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-6-user-feature-merge.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-6-user-feature-merge.py
old mode 100644
new mode 100755
index 204bb0b53..9e9daf6a5
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-6-user-feature-merge.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-6-user-feature-merge.py
@@ -26,9 +26,7 @@
usr_id_data = np.random.randint(0, 6040, (2)).reshape((-1)).astype('int64')
USR_ID_NUM = 6040 + 1
# 定义用户ID的embedding层和fc层
-usr_emb = Embedding(num_embeddings=USR_ID_NUM,
- embedding_dim=32,
- sparse=False)
+usr_emb = Embedding(num_embeddings=USR_ID_NUM, embedding_dim=32, sparse=False)
usr_fc = Linear(in_features=32, out_features=32)
usr_id_var = paddle.to_tensor(usr_id_data)
@@ -41,8 +39,7 @@
# 年龄的最大ID是56,所以Embedding层size的第一个参数设置为56 + 1 = 57
USR_AGE_DICT_SIZE = 56 + 1
-usr_age_emb = Embedding(num_embeddings=USR_AGE_DICT_SIZE,
- embedding_dim=16)
+usr_age_emb = Embedding(num_embeddings=USR_AGE_DICT_SIZE, embedding_dim=16)
usr_age_fc = Linear(in_features=16, out_features=16)
usr_age = paddle.to_tensor(usr_age_data)
@@ -56,8 +53,8 @@
USR_ID_NUM = 2
# 对用户性别信息做映射,并紧接着一个FC层
USR_GENDER_DICT_SIZE = 2
-usr_gender_emb = Embedding(num_embeddings=USR_GENDER_DICT_SIZE,
- embedding_dim=16)
+usr_gender_emb = Embedding(
+ num_embeddings=USR_GENDER_DICT_SIZE, embedding_dim=16)
usr_gender_fc = Linear(in_features=16, out_features=16)
@@ -65,12 +62,11 @@
usr_gender_feat = usr_gender_fc(usr_gender_emb(usr_gender_var))
usr_gender_feat = F.relu(usr_gender_feat)
-
# 自定义一个用户职业数据
usr_job_data = np.array((0, 20)).reshape(-1).astype('int64')
# 用户职业的最大ID是20,所以Embedding层size的第一个参数设置为20 + 1 = 21
USR_JOB_DICT_SIZE = 20 + 1
-usr_job_emb = Embedding(num_embeddings=USR_JOB_DICT_SIZE,embedding_dim=16)
+usr_job_emb = Embedding(num_embeddings=USR_JOB_DICT_SIZE, embedding_dim=16)
usr_job_fc = Linear(in_features=16, out_features=16)
usr_job = paddle.to_tensor(usr_job_data)
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-7-movie-id-embedding.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-7-movie-id-embedding.py
old mode 100644
new mode 100755
index 8d0066a49..ab82baccf
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-7-movie-id-embedding.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-7-movie-id-embedding.py
@@ -1,4 +1,3 @@
-
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -25,11 +24,12 @@
mov_emb = Embedding(num_embeddings=MOV_DICT_SIZE, embedding_dim=32)
mov_fc = Linear(32, 32)
-
print("输入的电影ID是:", mov_id_data)
mov_id_data = paddle.to_tensor(mov_id_data)
mov_id_feat = mov_fc(mov_emb(mov_id_data))
mov_id_feat = F.relu(mov_id_feat)
print("计算的电影ID的特征是", mov_id_feat.numpy(), "\n其形状是:", mov_id_feat.shape)
-print("\n电影ID为 {} 计算得到的特征是:{}".format(mov_id_data.numpy()[0], mov_id_feat.numpy()[0]))
-print("电影ID为 {} 计算得到的特征是:{}".format(mov_id_data.numpy()[1], mov_id_feat.numpy()[1]))
\ No newline at end of file
+print("\n电影ID为 {} 计算得到的特征是:{}".format(mov_id_data.numpy()[0],
+ mov_id_feat.numpy()[0]))
+print("电影ID为 {} 计算得到的特征是:{}".format(mov_id_data.numpy()[1],
+ mov_id_feat.numpy()[1]))
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-8-movie-category-embedding.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-8-movie-category-embedding.py
old mode 100644
new mode 100755
index 94717f44c..70414f4f6
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-8-movie-category-embedding.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-8-movie-category-embedding.py
@@ -1,4 +1,3 @@
-
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -19,7 +18,8 @@
import paddle.nn.functional as F
# 自定义一个电影类别数据
-mov_cat_data = np.array(((1, 2, 3, 0, 0, 0), (2, 3, 4, 0, 0, 0))).reshape(2, -1).astype('int64')
+mov_cat_data = np.array(((1, 2, 3, 0, 0, 0),
+ (2, 3, 4, 0, 0, 0))).reshape(2, -1).astype('int64')
# 对电影ID信息做映射,并紧接着一个Linear层
MOV_DICT_SIZE = 6 + 1
mov_emb = Embedding(num_embeddings=MOV_DICT_SIZE, embedding_dim=32)
@@ -36,5 +36,7 @@
mov_cat_feat = mov_fc(mov_cat_feat)
mov_cat_feat = F.relu(mov_cat_feat)
print("计算的电影类别的特征是", mov_cat_feat.numpy(), "\n其形状是:", mov_cat_feat.shape)
-print("\n电影类别为 {} 计算得到的特征是:{}".format(mov_cat_data.numpy()[0, :], mov_cat_feat.numpy()[0]))
-print("\n电影类别为 {} 计算得到的特征是:{}".format(mov_cat_data.numpy()[1, :], mov_cat_feat.numpy()[1]))
\ No newline at end of file
+print("\n电影类别为 {} 计算得到的特征是:{}".format(mov_cat_data.numpy()[0, :],
+ mov_cat_feat.numpy()[0]))
+print("\n电影类别为 {} 计算得到的特征是:{}".format(mov_cat_data.numpy()[1, :],
+ mov_cat_feat.numpy()[1]))
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-9-movie-title-embedding.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-9-movie-title-embedding.py
old mode 100644
new mode 100755
index 8c89032aa..addc1a82c
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-3-9-movie-title-embedding.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-3-9-movie-title-embedding.py
@@ -18,14 +18,21 @@
import paddle.nn.functional as F
# 自定义两个电影名称数据
-mov_title_data = np.array(((1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
- (2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))).reshape(2, 1, 15).astype('int64')
+mov_title_data = np.array(((1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ (2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0))).reshape(2, 1, 15).astype('int64')
# 对电影名称做映射,紧接着FC和pool层
MOV_TITLE_DICT_SIZE = 1000 + 1
mov_title_emb = Embedding(num_embeddings=MOV_TITLE_DICT_SIZE, embedding_dim=32)
-mov_title_conv = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=(2, 1), padding=0)
+mov_title_conv = Conv2D(
+ in_channels=1,
+ out_channels=1,
+ kernel_size=(3, 1),
+ stride=(2, 1),
+ padding=0)
# 使用 3 * 3卷积层代替全连接层
-mov_title_conv2 = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=1, padding=0)
+mov_title_conv2 = Conv2D(
+ in_channels=1, out_channels=1, kernel_size=(3, 1), stride=1, padding=0)
mov_title_data = paddle.to_tensor(mov_title_data)
print("电影名称数据的输入形状: ", mov_title_data.shape)
@@ -48,5 +55,7 @@
print("电影名称特征的最终特征输出形状:", mov_title_feat.shape)
print("\n计算的电影名称的特征是", mov_title_feat.numpy(), "\n其形状是:", mov_title_feat.shape)
-print("\n电影名称为 {} 计算得到的特征是:{}".format(mov_title_data.numpy()[0,:, 0], mov_title_feat.numpy()[0]))
-print("\n电影名称为 {} 计算得到的特征是:{}".format(mov_title_data.numpy()[1,:, 0], mov_title_feat.numpy()[1]))
\ No newline at end of file
+print("\n电影名称为 {} 计算得到的特征是:{}".format(mov_title_data.numpy()[0, :, 0],
+ mov_title_feat.numpy()[0]))
+print("\n电影名称为 {} 计算得到的特征是:{}".format(mov_title_data.numpy()[1, :, 0],
+ mov_title_feat.numpy()[1]))
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-4-1-training-evaluation.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-4-1-training-evaluation.py
old mode 100644
new mode 100755
index b187ead6d..d55c76dde
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-4-1-training-evaluation.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-4-1-training-evaluation.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import paddle
from paddle.nn import Linear, Embedding, Conv2D
import numpy as np
@@ -25,6 +24,7 @@
from PIL import Image
import math
+
class MovieLen(object):
def __init__(self, use_poster):
self.use_poster = use_poster
@@ -38,10 +38,12 @@ def __init__(self, use_poster):
movie_info_path = "./work/ml-1m/movies.dat"
self.poster_path = "./work/ml-1m/posters/"
# 得到电影数据
- self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(movie_info_path)
+ self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(
+ movie_info_path)
# 记录电影的最大ID
self.max_mov_cat = np.max([self.movie_cat[k] for k in self.movie_cat])
- self.max_mov_tit = np.max([self.movie_title[k] for k in self.movie_title])
+ self.max_mov_tit = np.max(
+ [self.movie_title[k] for k in self.movie_title])
self.max_mov_id = np.max(list(map(int, self.movie_info.keys())))
# 记录用户数据的最大ID
self.max_usr_id = 0
@@ -52,15 +54,18 @@ def __init__(self, use_poster):
# 得到评分数据
self.rating_info = self.get_rating_info(rating_path)
# 构建数据集
- self.dataset = self.get_dataset(usr_info=self.usr_info,
- rating_info=self.rating_info,
- movie_info=self.movie_info)
+ self.dataset = self.get_dataset(
+ usr_info=self.usr_info,
+ rating_info=self.rating_info,
+ movie_info=self.movie_info)
# 划分数据集,获得数据加载器
- self.train_dataset = self.dataset[:int(len(self.dataset)*0.9)]
- self.valid_dataset = self.dataset[int(len(self.dataset)*0.9):]
+ self.train_dataset = self.dataset[:int(len(self.dataset) * 0.9)]
+ self.valid_dataset = self.dataset[int(len(self.dataset) * 0.9):]
print("##Total dataset instances: ", len(self.dataset))
print("##MovieLens dataset information: \nusr num: {}\n"
- "movies num: {}".format(len(self.usr_info),len(self.movie_info)))
+ "movies num: {}".format(
+ len(self.usr_info), len(self.movie_info)))
+
# 得到电影数据
def get_movie_info(self, path):
# 打开文件,编码方式选择ISO-8859-1,读取所有数据到data中
@@ -92,17 +97,19 @@ def get_movie_info(self, path):
c_count += 1
# 补0使电影名称对应的列表长度为15
v_tit = [movie_titles[k] for k in titles]
- while len(v_tit)<15:
+ while len(v_tit) < 15:
v_tit.append(0)
# 补0使电影种类对应的列表长度为6
v_cat = [movie_cat[k] for k in cats]
- while len(v_cat)<6:
+ while len(v_cat) < 6:
v_cat.append(0)
# 保存电影数据到movie_info中
- movie_info[v_id] = {'mov_id': int(v_id),
- 'title': v_tit,
- 'category': v_cat,
- 'years': int(v_year)}
+ movie_info[v_id] = {
+ 'mov_id': int(v_id),
+ 'title': v_tit,
+ 'category': v_cat,
+ 'years': int(v_year)
+ }
return movie_info, movie_cat, movie_titles
def get_usr_info(self, path):
@@ -123,14 +130,17 @@ def gender2num(gender):
item = item.strip().split("::")
usr_id = item[0]
# 将字符数据转成数字并保存在字典中
- use_info[usr_id] = {'usr_id': int(usr_id),
- 'gender': gender2num(item[1]),
- 'age': int(item[2]),
- 'job': int(item[3])}
+ use_info[usr_id] = {
+ 'usr_id': int(usr_id),
+ 'gender': gender2num(item[1]),
+ 'age': int(item[2]),
+ 'job': int(item[3])
+ }
self.max_usr_id = max(self.max_usr_id, int(usr_id))
self.max_usr_age = max(self.max_usr_age, int(item[2]))
self.max_usr_job = max(self.max_usr_job, int(item[3]))
return use_info
+
# 得到评分数据
def get_rating_info(self, path):
# 读取文件里的数据
@@ -140,23 +150,26 @@ def get_rating_info(self, path):
rating_info = {}
for item in data:
item = item.strip().split("::")
- usr_id,movie_id,score = item[0],item[1],item[2]
+ usr_id, movie_id, score = item[0], item[1], item[2]
if usr_id not in rating_info.keys():
- rating_info[usr_id] = {movie_id:float(score)}
+ rating_info[usr_id] = {movie_id: float(score)}
else:
rating_info[usr_id][movie_id] = float(score)
return rating_info
+
# 构建数据集
def get_dataset(self, usr_info, rating_info, movie_info):
trainset = []
for usr_id in rating_info.keys():
usr_ratings = rating_info[usr_id]
for movie_id in usr_ratings:
- trainset.append({'usr_info': usr_info[usr_id],
- 'mov_info': movie_info[movie_id],
- 'scores': usr_ratings[movie_id]})
+ trainset.append({
+ 'usr_info': usr_info[usr_id],
+ 'mov_info': movie_info[movie_id],
+ 'scores': usr_ratings[movie_id]
+ })
return trainset
-
+
def load_data(self, dataset=None, mode='train'):
use_poster = False
@@ -165,6 +178,7 @@ def load_data(self, dataset=None, mode='train'):
data_length = len(dataset)
index_list = list(range(data_length))
+
# 定义数据迭代加载器
def data_generator():
# 训练模式下,打乱训练数据
@@ -189,7 +203,8 @@ def data_generator():
if use_poster:
# 不使用图像特征时,不读取图像数据,加快数据读取速度
- poster = Image.open(self.poster_path+'mov_id{}.jpg'.format(str(mov_id[0])))
+ poster = Image.open(self.poster_path +
+ 'mov_id{}.jpg'.format(str(mov_id[0])))
poster = poster.resize([64, 64])
if len(poster.size) <= 2:
poster = poster.convert("RGB")
@@ -198,7 +213,7 @@ def data_generator():
score_list.append(int(dataset[i]['scores']))
# 如果读取的数据量达到当前的batch大小,就返回当前批次
- if len(usr_id_list)==BATCHSIZE:
+ if len(usr_id_list) == BATCHSIZE:
# 转换列表数据为数组形式,reshape到固定形状
usr_id_arr = np.array(usr_id_list)
usr_gender_arr = np.array(usr_gender_list)
@@ -206,15 +221,22 @@ def data_generator():
usr_job_arr = np.array(usr_job_list)
mov_id_arr = np.array(mov_id_list)
- mov_cat_arr = np.reshape(np.array(mov_cat_list), [BATCHSIZE, 6]).astype(np.int64)
- mov_tit_arr = np.reshape(np.array(mov_tit_list), [BATCHSIZE, 1, 15]).astype(np.int64)
+ mov_cat_arr = np.reshape(
+ np.array(mov_cat_list),
+ [BATCHSIZE, 6]).astype(np.int64)
+ mov_tit_arr = np.reshape(
+ np.array(mov_tit_list),
+ [BATCHSIZE, 1, 15]).astype(np.int64)
if use_poster:
- mov_poster_arr = np.reshape(np.array(mov_poster_list)/127.5 - 1, [BATCHSIZE, 3, 64, 64]).astype(np.float32)
+ mov_poster_arr = np.reshape(
+ np.array(mov_poster_list) / 127.5 - 1,
+ [BATCHSIZE, 3, 64, 64]).astype(np.float32)
else:
mov_poster_arr = np.array([0.])
- scores_arr = np.reshape(np.array(score_list), [-1, 1]).astype(np.float32)
+ scores_arr = np.reshape(np.array(score_list),
+ [-1, 1]).astype(np.float32)
# 放回当前批次数据
yield [usr_id_arr, usr_gender_arr, usr_age_arr, usr_job_arr], \
@@ -224,78 +246,109 @@ def data_generator():
usr_id_list, usr_gender_list, usr_age_list, usr_job_list = [], [], [], []
mov_id_list, mov_tit_list, mov_cat_list, score_list = [], [], [], []
mov_poster_list = []
+
return data_generator
+
class Model(paddle.nn.Layer):
- def __init__(self, use_poster, use_mov_title, use_mov_cat, use_age_job,fc_sizes):
+ def __init__(self, use_poster, use_mov_title, use_mov_cat, use_age_job,
+ fc_sizes):
super(Model, self).__init__()
-
+
# 将传入的name信息和bool型参数添加到模型类中
self.use_mov_poster = use_poster
self.use_mov_title = use_mov_title
self.use_usr_age_job = use_age_job
self.use_mov_cat = use_mov_cat
- self.fc_sizes=fc_sizes
-
+ self.fc_sizes = fc_sizes
+
# 获取数据集的信息,并构建训练和验证集的数据迭代器
Dataset = MovieLen(self.use_mov_poster)
self.Dataset = Dataset
self.trainset = self.Dataset.train_dataset
self.valset = self.Dataset.valid_dataset
- self.train_loader = self.Dataset.load_data(dataset=self.trainset, mode='train')
- self.valid_loader = self.Dataset.load_data(dataset=self.valset, mode='valid')
-
- usr_embedding_dim=32
- gender_embeding_dim=16
- age_embedding_dim=16
- job_embedding_dim=16
-
- mov_embedding_dim=16
- category_embedding_dim=16
- title_embedding_dim=32
-
+ self.train_loader = self.Dataset.load_data(
+ dataset=self.trainset, mode='train')
+ self.valid_loader = self.Dataset.load_data(
+ dataset=self.valset, mode='valid')
+
+ usr_embedding_dim = 32
+ gender_embeding_dim = 16
+ age_embedding_dim = 16
+ job_embedding_dim = 16
+
+ mov_embedding_dim = 16
+ category_embedding_dim = 16
+ title_embedding_dim = 32
""" define network layer for embedding usr info """
USR_ID_NUM = Dataset.max_usr_id + 1
-
+
# 对用户ID做映射,并紧接着一个Linear层
- self.usr_emb = Embedding(num_embeddings=USR_ID_NUM, embedding_dim=usr_embedding_dim, sparse=False)
+ self.usr_emb = Embedding(
+ num_embeddings=USR_ID_NUM,
+ embedding_dim=usr_embedding_dim,
+ sparse=False)
self.usr_fc = Linear(in_features=usr_embedding_dim, out_features=32)
-
+
# 对用户性别信息做映射,并紧接着一个Linear层
USR_GENDER_DICT_SIZE = 2
- self.usr_gender_emb = Embedding(num_embeddings=USR_GENDER_DICT_SIZE, embedding_dim=gender_embeding_dim)
- self.usr_gender_fc = Linear(in_features=gender_embeding_dim, out_features=16)
-
+ self.usr_gender_emb = Embedding(
+ num_embeddings=USR_GENDER_DICT_SIZE,
+ embedding_dim=gender_embeding_dim)
+ self.usr_gender_fc = Linear(
+ in_features=gender_embeding_dim, out_features=16)
+
# 对用户年龄信息做映射,并紧接着一个Linear层
USR_AGE_DICT_SIZE = Dataset.max_usr_age + 1
- self.usr_age_emb = Embedding(num_embeddings=USR_AGE_DICT_SIZE, embedding_dim=age_embedding_dim)
- self.usr_age_fc = Linear(in_features=age_embedding_dim, out_features=16)
-
+ self.usr_age_emb = Embedding(
+ num_embeddings=USR_AGE_DICT_SIZE, embedding_dim=age_embedding_dim)
+ self.usr_age_fc = Linear(
+ in_features=age_embedding_dim, out_features=16)
+
# 对用户职业信息做映射,并紧接着一个Linear层
USR_JOB_DICT_SIZE = Dataset.max_usr_job + 1
- self.usr_job_emb = Embedding(num_embeddings=USR_JOB_DICT_SIZE, embedding_dim=job_embedding_dim)
- self.usr_job_fc = Linear(in_features=job_embedding_dim, out_features=16)
-
+ self.usr_job_emb = Embedding(
+ num_embeddings=USR_JOB_DICT_SIZE, embedding_dim=job_embedding_dim)
+ self.usr_job_fc = Linear(
+ in_features=job_embedding_dim, out_features=16)
+
# 新建一个Linear层,用于整合用户数据信息
self.usr_combined = Linear(in_features=80, out_features=200)
-
""" define network layer for embedding usr info """
# 对电影ID信息做映射,并紧接着一个Linear层
MOV_DICT_SIZE = Dataset.max_mov_id + 1
- self.mov_emb = Embedding(num_embeddings=MOV_DICT_SIZE, embedding_dim=mov_embedding_dim)
+ self.mov_emb = Embedding(
+ num_embeddings=MOV_DICT_SIZE, embedding_dim=mov_embedding_dim)
self.mov_fc = Linear(in_features=mov_embedding_dim, out_features=32)
-
+
# 对电影类别做映射
CATEGORY_DICT_SIZE = len(Dataset.movie_cat) + 1
- self.mov_cat_emb = Embedding(num_embeddings=CATEGORY_DICT_SIZE, embedding_dim=category_embedding_dim, sparse=False)
- self.mov_cat_fc = Linear(in_features=category_embedding_dim, out_features=32)
-
+ self.mov_cat_emb = Embedding(
+ num_embeddings=CATEGORY_DICT_SIZE,
+ embedding_dim=category_embedding_dim,
+ sparse=False)
+ self.mov_cat_fc = Linear(
+ in_features=category_embedding_dim, out_features=32)
+
# 对电影名称做映射
MOV_TITLE_DICT_SIZE = len(Dataset.movie_title) + 1
- self.mov_title_emb = Embedding(num_embeddings=MOV_TITLE_DICT_SIZE, embedding_dim=title_embedding_dim, sparse=False)
- self.mov_title_conv = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=(2,1), padding=0)
- self.mov_title_conv2 = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=1, padding=0)
-
+ self.mov_title_emb = Embedding(
+ num_embeddings=MOV_TITLE_DICT_SIZE,
+ embedding_dim=title_embedding_dim,
+ sparse=False)
+ self.mov_title_conv = Conv2D(
+ in_channels=1,
+ out_channels=1,
+ kernel_size=(3, 1),
+ stride=(2, 1),
+ padding=0)
+ self.mov_title_conv2 = Conv2D(
+ in_channels=1,
+ out_channels=1,
+ kernel_size=(3, 1),
+ stride=1,
+ padding=0)
+
# 新建一个Linear层,用于整合电影特征
self.mov_concat_embed = Linear(in_features=96, out_features=200)
@@ -329,7 +382,7 @@ def __init__(self, use_poster, use_mov_title, use_mov_cat, use_age_job,fc_sizes)
if acts[i] == 'relu':
act = paddle.nn.ReLU()
self._movie_layers.append(act)
-
+
# 定义计算用户特征的前向运算过程
def get_usr_feat(self, usr_var):
""" get usr features"""
@@ -341,7 +394,7 @@ def get_usr_feat(self, usr_var):
usr_id = self.usr_fc(usr_id)
usr_id = F.relu(usr_id)
feats_collect.append(usr_id)
-
+
# 计算用户的性别特征,并保存在feats_collect中
usr_gender = self.usr_gender_emb(usr_gender)
usr_gender = self.usr_gender_fc(usr_gender)
@@ -359,7 +412,7 @@ def get_usr_feat(self, usr_var):
usr_job = self.usr_job_fc(usr_job)
usr_job = F.relu(usr_job)
feats_collect.append(usr_job)
-
+
# 将用户的特征级联,并通过Linear层得到最终的用户特征
usr_feat = paddle.concat(feats_collect, axis=1)
user_features = F.tanh(self.usr_combined(usr_feat))
@@ -383,7 +436,7 @@ def get_mov_feat(self, mov_var):
mov_id = self.mov_fc(mov_id)
mov_id = F.relu(mov_id)
feats_collect.append(mov_id)
-
+
# 如果使用电影的种类数据,计算电影种类特征的映射
if self.use_mov_cat:
# 计算电影种类的特征映射,对多个种类的特征求和得到最终特征
@@ -396,31 +449,32 @@ def get_mov_feat(self, mov_var):
if self.use_mov_title:
# 计算电影名字的特征映射,对特征映射使用卷积计算最终的特征
mov_title = self.mov_title_emb(mov_title)
- mov_title = F.relu(self.mov_title_conv2(F.relu(self.mov_title_conv(mov_title))))
+ mov_title = F.relu(
+ self.mov_title_conv2(F.relu(self.mov_title_conv(mov_title))))
mov_title = paddle.sum(mov_title, axis=2, keepdim=False)
mov_title = F.relu(mov_title)
mov_title = paddle.reshape(mov_title, [batch_size, -1])
-
+
feats_collect.append(mov_title)
-
+
# 使用一个全连接层,整合所有电影特征,映射为一个200维的特征向量
mov_feat = paddle.concat(feats_collect, axis=1)
mov_features = F.tanh(self.mov_concat_embed(mov_feat))
for n_layer in self._movie_layers:
mov_features = n_layer(mov_features)
-
+
return mov_features
-
+
# 定义个性化推荐算法的前向计算
def forward(self, usr_var, mov_var):
# 计算用户特征和电影特征
user_features = self.get_usr_feat(usr_var)
mov_features = self.get_mov_feat(mov_var)
-
-
+
#使用余弦相似度算子,计算用户和电影的相似程度
- sim = F.cosine_similarity(user_features, mov_features, axis=1).reshape([-1, 1])
+ sim = F.cosine_similarity(
+ user_features, mov_features, axis=1).reshape([-1, 1])
# 将相似度扩大范围到和电影评分相同数据范围
res = paddle.scale(sim, scale=5)
return user_features, mov_features, res
@@ -430,15 +484,16 @@ def train(model):
# 配置训练参数
lr = 0.001
Epoches = 10
- paddle.set_device('cpu')
+ paddle.set_device('cpu')
# 启动训练
model.train()
# 获得数据读取器
data_loader = model.train_loader
# 使用adam优化器,学习率使用0.01
- opt = paddle.optimizer.Adam(learning_rate=lr, parameters=model.parameters())
-
+ opt = paddle.optimizer.Adam(
+ learning_rate=lr, parameters=model.parameters())
+
for epoch in range(0, Epoches):
for idx, data in enumerate(data_loader()):
# 获得数据,并转为tensor格式
@@ -453,17 +508,22 @@ def train(model):
avg_loss = paddle.mean(loss)
if idx % 500 == 0:
- print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, idx, avg_loss.numpy()))
-
+ print("epoch: {}, batch_id: {}, loss is: {}".format(
+ epoch, idx, avg_loss.numpy()))
+
# 损失函数下降,并清除梯度
avg_loss.backward()
opt.step()
opt.clear_grad()
# 每个epoch 保存一次模型
- paddle.save(model.state_dict(), './checkpoint/epoch'+str(epoch)+'.pdparams')
+ paddle.save(model.state_dict(),
+ './checkpoint/epoch' + str(epoch) + '.pdparams')
+
from math import sqrt
+
+
def evaluation(model, params_file_path):
model_state_dict = paddle.load(params_file_path)
model.load_dict(model_state_dict)
@@ -471,7 +531,7 @@ def evaluation(model, params_file_path):
acc_set = []
avg_loss_set = []
- squaredError=[]
+ squaredError = []
for idx, data in enumerate(model.valid_loader()):
usr, mov, score_label = data
usr_v = [paddle.to_tensor(var) for var in usr]
@@ -480,36 +540,37 @@ def evaluation(model, params_file_path):
_, _, scores_predict = model(usr_v, mov_v)
pred_scores = scores_predict.numpy()
-
+
avg_loss_set.append(np.mean(np.abs(pred_scores - score_label)))
squaredError.extend(np.abs(pred_scores - score_label)**2)
diff = np.abs(pred_scores - score_label)
- diff[diff>0.5] = 1
+ diff[diff > 0.5] = 1
acc = 1 - np.mean(diff)
acc_set.append(acc)
- RMSE=sqrt(np.sum(squaredError) / len(squaredError))
+ RMSE = sqrt(np.sum(squaredError) / len(squaredError))
# print("RMSE = ", sqrt(np.sum(squaredError) / len(squaredError)))#均方根误差RMSE
- return np.mean(acc_set), np.mean(avg_loss_set),RMSE
+ return np.mean(acc_set), np.mean(avg_loss_set), RMSE
+
# 启动训练
-fc_sizes=[128, 64, 32]
+fc_sizes = [128, 64, 32]
use_poster, use_mov_title, use_mov_cat, use_age_job = False, True, True, True
-model = Model(use_poster, use_mov_title, use_mov_cat, use_age_job,fc_sizes)
+model = Model(use_poster, use_mov_title, use_mov_cat, use_age_job, fc_sizes)
train(model)
param_path = "./checkpoint/epoch"
for i in range(10):
- acc, mae,RMSE = evaluation(model, param_path+str(i)+'.pdparams')
- print("ACC:", acc, "MAE:", mae,'RMSE:',RMSE)
+ acc, mae, RMSE = evaluation(model, param_path + str(i) + '.pdparams')
+ print("ACC:", acc, "MAE:", mae, 'RMSE:', RMSE)
# 定义特征保存函数
def get_usr_mov_features(model, params_file_path, poster_path):
- paddle.set_device('cpu')
+ paddle.set_device('cpu')
usr_pkl = {}
mov_pkl = {}
-
+
# 定义将list中每个元素转成tensor的函数
def list2tensor(inputs, shape):
inputs = np.reshape(np.array(inputs).astype(np.int64), shape)
@@ -525,7 +586,8 @@ def list2tensor(inputs, shape):
for i in range(len(dataset)):
# 获得用户数据,电影数据,评分数据
# 本案例只转换所有在样本中出现过的user和movie,实际中可以使用业务系统中的全量数据
- usr_info, mov_info, score = dataset[i]['usr_info'], dataset[i]['mov_info'],dataset[i]['scores']
+ usr_info, mov_info, score = dataset[i]['usr_info'], dataset[i][
+ 'mov_info'], dataset[i]['scores']
usrid = str(usr_info['usr_id'])
movid = str(mov_info['mov_id'])
@@ -540,7 +602,7 @@ def list2tensor(inputs, shape):
usr_feat = model.get_usr_feat(usr_in)
usr_pkl[usrid] = usr_feat.numpy()
-
+
# 获得电影数据,计算得到电影特征,保存在mov_pkl字典中
if movid not in mov_pkl.keys():
mov_id_v = list2tensor(mov_info['mov_id'], [1])
@@ -551,8 +613,6 @@ def list2tensor(inputs, shape):
mov_feat = model.get_mov_feat(mov_in)
mov_pkl[movid] = mov_feat.numpy()
-
-
print(len(mov_pkl.keys()))
# 保存特征到本地
@@ -563,4 +623,4 @@ def list2tensor(inputs, shape):
param_path = "./checkpoint/epoch9.pdparams"
poster_path = "./work/ml-1m/posters/"
-get_usr_mov_features(model, param_path, poster_path)
\ No newline at end of file
+get_usr_mov_features(model, param_path, poster_path)
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-5-1-read-feature.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-5-1-read-feature.py
old mode 100644
new mode 100755
index f71efda83..0dfe2c977
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-5-1-read-feature.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-5-1-read-feature.py
@@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
# unzip -o data/save_feature_v1.zip -d /home/aistudio/
-import pickle
+import pickle
import numpy as np
mov_feat_dir = 'mov_feat.pkl'
@@ -51,11 +50,10 @@
usr_info[str(item[0])] = item
print("当前的用户是:")
-print("usr_id:", usr_id, usr_info[str(usr_id)])
+print("usr_id:", usr_id, usr_info[str(usr_id)])
print("对应的特征是:", usr_feats[str(usr_id)])
print("\n当前电影是:")
print("mov_id:", mov_id, mov_info[str(mov_id)])
print("对应的特征是:")
print(mov_feat)
-
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-5-2-calculate-similarity.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-5-2-calculate-similarity.py
old mode 100644
new mode 100755
index 659596128..e7271b0e6
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-5-2-calculate-similarity.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-5-2-calculate-similarity.py
@@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
# unzip -o data/save_feature_v1.zip -d /home/aistudio/
import paddle
-import pickle
+import pickle
import numpy as np
usr_file = "./ml-1m/users.dat"
@@ -60,12 +59,13 @@
mov_feat = mov_feats[key]
usr_feat = paddle.to_tensor(usr_ID_feat)
mov_feat = paddle.to_tensor(mov_feat)
-
+
# 计算余弦相似度
sim = paddle.nn.functional.common.cosine_similarity(usr_feat, mov_feat)
# 打印特征和相似度的形状
- if idx==0:
- print("电影特征形状:{}, 用户特征形状:{}, 相似度结果形状:{},相似度结果:{}".format(mov_feat.shape, usr_feat.shape, sim.numpy().shape, sim.numpy()))
+ if idx == 0:
+ print("电影特征形状:{}, 用户特征形状:{}, 相似度结果形状:{},相似度结果:{}".format(
+ mov_feat.shape, usr_feat.shape, sim.numpy().shape, sim.numpy()))
# 从形状为(1,1)的相似度sim中获得相似度值sim.numpy()[0],并添加到相似度列表cos_sims中
cos_sims.append(sim.numpy()[0])
@@ -73,21 +73,18 @@
index = np.argsort(cos_sims)
# 打印相似度最大的前topk个位置
topk = 5
-print("相似度最大的前{}个索引是{}\n对应的相似度是:{}\n".format(topk, index[-topk:], [cos_sims[k] for k in index[-topk:]]))
+print("相似度最大的前{}个索引是{}\n对应的相似度是:{}\n".format(topk, index[
+ -topk:], [cos_sims[k] for k in index[-topk:]]))
for i in index[-topk:]:
print("对应的电影分别是:movie:{}".format(mov_info[list(mov_feats.keys())[i]]))
-
-
-
-
top_k, pick_num = 10, 6
# 对相似度排序,获得最大相似度在cos_sims中的位置
index = np.argsort(cos_sims)[-top_k:]
print("当前的用户是:")
# usr_id, usr_info 是前面定义、读取的用户ID、用户信息
-print("usr_id:", usr_id, usr_info[str(usr_id)])
+print("usr_id:", usr_id, usr_info[str(usr_id)])
print("推荐可能喜欢的电影是:")
res = []
@@ -100,4 +97,4 @@
res.append(mov_id)
for id in res:
- print("mov_id:", id, mov_info[str(id)])
\ No newline at end of file
+ print("mov_id:", id, mov_info[str(id)])
diff --git a/junior_class/chapter-7-Recommendation_System/code/examples/7-5-3-movie-recommend.py b/junior_class/chapter-7-Recommendation_System/code/examples/7-5-3-movie-recommend.py
old mode 100644
new mode 100755
index ff5007bc2..1423de861
--- a/junior_class/chapter-7-Recommendation_System/code/examples/7-5-3-movie-recommend.py
+++ b/junior_class/chapter-7-Recommendation_System/code/examples/7-5-3-movie-recommend.py
@@ -18,11 +18,13 @@
import paddle.nn.functional as F
# unzip -o data/save_feature_v1.zip -d /home/aistudio/
-import pickle
+import pickle
import numpy as np
+
# 定义根据用户兴趣推荐电影
-def recommend_mov_for_usr(usr_id, top_k, pick_num, usr_feat_dir, mov_feat_dir, mov_info_path):
+def recommend_mov_for_usr(usr_id, top_k, pick_num, usr_feat_dir, mov_feat_dir,
+ mov_info_path):
assert pick_num <= top_k
# 读取电影和用户的特征
usr_feats = pickle.load(open(usr_feat_dir, 'rb'))
@@ -40,7 +42,7 @@ def recommend_mov_for_usr(usr_id, top_k, pick_num, usr_feat_dir, mov_feat_dir, m
mov_feat = paddle.to_tensor(mov_feat)
# 计算余弦相似度
sim = paddle.nn.functional.common.cosine_similarity(usr_feat, mov_feat)
-
+
cos_sims.append(sim.numpy()[0])
# 对相似度排序
index = np.argsort(cos_sims)[-top_k:]
@@ -52,12 +54,12 @@ def recommend_mov_for_usr(usr_id, top_k, pick_num, usr_feat_dir, mov_feat_dir, m
for item in data:
item = item.strip().split("::")
mov_info[str(item[0])] = item
-
+
print("当前的用户是:")
print("usr_id:", usr_id)
print("推荐可能喜欢的电影是:")
res = []
-
+
# 加入随机选择因素,确保每次推荐的都不一样
while len(res) < pick_num:
val = np.random.choice(len(index), 1)[0]
@@ -69,12 +71,12 @@ def recommend_mov_for_usr(usr_id, top_k, pick_num, usr_feat_dir, mov_feat_dir, m
for id in res:
print("mov_id:", id, mov_info[str(id)])
+
movie_data_path = "./ml-1m/movies.dat"
top_k, pick_num = 10, 6
usr_id = 2
-recommend_mov_for_usr(usr_id, top_k, pick_num, 'usr_feat.pkl', 'mov_feat.pkl', movie_data_path)
-
-
+recommend_mov_for_usr(usr_id, top_k, pick_num, 'usr_feat.pkl', 'mov_feat.pkl',
+ movie_data_path)
# 给定一个用户ID,找到评分最高的topk个电影
@@ -88,12 +90,12 @@ def recommend_mov_for_usr(usr_id, top_k, pick_num, usr_feat_dir, mov_feat_dir, m
# 打开文件,ratings_data
with open(rating_path, 'r') as f:
ratings_data = f.readlines()
-
+
usr_rating_info = {}
for item in ratings_data:
item = item.strip().split("::")
# 处理每行数据,分别得到用户ID,电影ID,和评分
- usr_id,movie_id,score = item[0],item[1],item[2]
+ usr_id, movie_id, score = item[0], item[1], item[2]
if usr_id == str(usr_a):
usr_rating_info[movie_id] = float(score)
@@ -104,13 +106,14 @@ def recommend_mov_for_usr(usr_id, top_k, pick_num, usr_feat_dir, mov_feat_dir, m
#####################################
## 选出ID为usr_a评分最高的前topk个电影 ##
#####################################
-ratings_topk = sorted(usr_rating_info.items(), key=lambda item:item[1])[-topk:]
+ratings_topk = sorted(
+ usr_rating_info.items(), key=lambda item: item[1])[-topk:]
movie_info_path = "./ml-1m/movies.dat"
# 打开文件,编码方式选择ISO-8859-1,读取所有数据到data中
with open(movie_info_path, 'r', encoding="ISO-8859-1") as f:
data = f.readlines()
-
+
movie_info = {}
for item in data:
item = item.strip().split("::")
diff --git a/junior_class/chapter-7-Recommendation_System/code/movielens_dataset.py b/junior_class/chapter-7-Recommendation_System/code/movielens_dataset.py
old mode 100644
new mode 100755
index b1ba1c765..25c733b13
--- a/junior_class/chapter-7-Recommendation_System/code/movielens_dataset.py
+++ b/junior_class/chapter-7-Recommendation_System/code/movielens_dataset.py
@@ -16,24 +16,27 @@
import random
from PIL import Image
+
class MovieLen(object):
- def __init__(self, use_poster,data_path):
+ def __init__(self, use_poster, data_path):
self.use_poster = use_poster
# 声明每个数据文件的路径
- usr_info_path = os.path.join(data_path,"ml-1m/users.dat")
+ usr_info_path = os.path.join(data_path, "ml-1m/users.dat")
if self.use_poster:
- rating_path = os.path.join(data_path,"ml-1m/new_rating.txt")
+ rating_path = os.path.join(data_path, "ml-1m/new_rating.txt")
else:
- rating_path = os.path.join(data_path,"ml-1m/ratings.dat")
+ rating_path = os.path.join(data_path, "ml-1m/ratings.dat")
- movie_info_path = os.path.join(data_path,"ml-1m/movies.dat")
+ movie_info_path = os.path.join(data_path, "ml-1m/movies.dat")
- self.poster_path = os.path.join(data_path,"ml-1m/posters/")
+ self.poster_path = os.path.join(data_path, "ml-1m/posters/")
# 得到电影数据
- self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(movie_info_path)
+ self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(
+ movie_info_path)
# 记录电影的最大ID
self.max_mov_cat = np.max([self.movie_cat[k] for k in self.movie_cat])
- self.max_mov_tit = np.max([self.movie_title[k] for k in self.movie_title])
+ self.max_mov_tit = np.max(
+ [self.movie_title[k] for k in self.movie_title])
self.max_mov_id = np.max(list(map(int, self.movie_info.keys())))
# 记录用户数据的最大ID
self.max_usr_id = 0
@@ -44,15 +47,18 @@ def __init__(self, use_poster,data_path):
# 得到评分数据
self.rating_info = self.get_rating_info(rating_path)
# 构建数据集
- self.dataset = self.get_dataset(usr_info=self.usr_info,
- rating_info=self.rating_info,
- movie_info=self.movie_info)
+ self.dataset = self.get_dataset(
+ usr_info=self.usr_info,
+ rating_info=self.rating_info,
+ movie_info=self.movie_info)
# 划分数据集,获得数据加载器
- self.train_dataset = self.dataset[:int(len(self.dataset)*0.9)]
- self.valid_dataset = self.dataset[int(len(self.dataset)*0.9):]
+ self.train_dataset = self.dataset[:int(len(self.dataset) * 0.9)]
+ self.valid_dataset = self.dataset[int(len(self.dataset) * 0.9):]
print("##Total dataset instances: ", len(self.dataset))
print("##MovieLens dataset information: \nusr num: {}\n"
- "movies num: {}".format(len(self.usr_info),len(self.movie_info)))
+ "movies num: {}".format(
+ len(self.usr_info), len(self.movie_info)))
+
# 得到电影数据
def get_movie_info(self, path):
# 打开文件,编码方式选择ISO-8859-1,读取所有数据到data中
@@ -84,17 +90,19 @@ def get_movie_info(self, path):
c_count += 1
# 补0使电影名称对应的列表长度为15
v_tit = [movie_titles[k] for k in titles]
- while len(v_tit)<15:
+ while len(v_tit) < 15:
v_tit.append(0)
# 补0使电影种类对应的列表长度为6
v_cat = [movie_cat[k] for k in cats]
- while len(v_cat)<6:
+ while len(v_cat) < 6:
v_cat.append(0)
# 保存电影数据到movie_info中
- movie_info[v_id] = {'mov_id': int(v_id),
- 'title': v_tit,
- 'category': v_cat,
- 'years': int(v_year)}
+ movie_info[v_id] = {
+ 'mov_id': int(v_id),
+ 'title': v_tit,
+ 'category': v_cat,
+ 'years': int(v_year)
+ }
return movie_info, movie_cat, movie_titles
def get_usr_info(self, path):
@@ -114,10 +122,12 @@ def gender2num(gender):
item = item.strip().split("::")
usr_id = item[0]
# 将字符数据转成数字并保存在字典中
- use_info[usr_id] = {'usr_id': int(usr_id),
- 'gender': gender2num(item[1]),
- 'age': int(item[2]),
- 'job': int(item[3])}
+ use_info[usr_id] = {
+ 'usr_id': int(usr_id),
+ 'gender': gender2num(item[1]),
+ 'age': int(item[2]),
+ 'job': int(item[3])
+ }
self.max_usr_id = max(self.max_usr_id, int(usr_id))
self.max_usr_age = max(self.max_usr_age, int(item[2]))
self.max_usr_job = max(self.max_usr_job, int(item[3]))
@@ -132,27 +142,31 @@ def get_rating_info(self, path):
rating_info = {}
for item in data:
item = item.strip().split("::")
- usr_id,movie_id,score = item[0],item[1],item[2]
+ usr_id, movie_id, score = item[0], item[1], item[2]
if usr_id not in rating_info.keys():
- rating_info[usr_id] = {movie_id:float(score)}
+ rating_info[usr_id] = {movie_id: float(score)}
else:
rating_info[usr_id][movie_id] = float(score)
return rating_info
+
# 构建数据集
def get_dataset(self, usr_info, rating_info, movie_info):
trainset = []
for usr_id in rating_info.keys():
usr_ratings = rating_info[usr_id]
for movie_id in usr_ratings:
- trainset.append({'usr_info': usr_info[usr_id],
- 'mov_info': movie_info[movie_id],
- 'scores': usr_ratings[movie_id]})
+ trainset.append({
+ 'usr_info': usr_info[usr_id],
+ 'mov_info': movie_info[movie_id],
+ 'scores': usr_ratings[movie_id]
+ })
return trainset
-
- def load_data(self, dataset=None, mode='train',batch_size=2):
+
+ def load_data(self, dataset=None, mode='train', batch_size=2):
data_length = len(dataset)
index_list = list(range(data_length))
- BATCHSIZE=batch_size
+ BATCHSIZE = batch_size
+
# 定义数据迭代加载器
def data_generator():
# 训练模式下,打乱训练数据
@@ -177,7 +191,8 @@ def data_generator():
if self.use_poster:
# 不使用图像特征时,不读取图像数据,加快数据读取速度
- poster = Image.open(self.poster_path+'mov_id{}.jpg'.format(str(mov_id[0])))
+ poster = Image.open(self.poster_path +
+ 'mov_id{}.jpg'.format(str(mov_id[0])))
poster = poster.resize([64, 64])
if len(poster.size) <= 2:
poster = poster.convert("RGB")
@@ -186,7 +201,7 @@ def data_generator():
score_list.append(int(dataset[i]['scores']))
# 如果读取的数据量达到当前的batch大小,就返回当前批次
- if len(usr_id_list)==BATCHSIZE:
+ if len(usr_id_list) == BATCHSIZE:
# 转换列表数据为数组形式,reshape到固定形状
usr_id_arr = np.array(usr_id_list)
usr_gender_arr = np.array(usr_gender_list)
@@ -194,15 +209,22 @@ def data_generator():
usr_job_arr = np.array(usr_job_list)
mov_id_arr = np.array(mov_id_list)
- mov_cat_arr = np.reshape(np.array(mov_cat_list), [BATCHSIZE, 6]).astype(np.int64)
- mov_tit_arr = np.reshape(np.array(mov_tit_list), [BATCHSIZE, 1, 15]).astype(np.int64)
+ mov_cat_arr = np.reshape(
+ np.array(mov_cat_list),
+ [BATCHSIZE, 6]).astype(np.int64)
+ mov_tit_arr = np.reshape(
+ np.array(mov_tit_list),
+ [BATCHSIZE, 1, 15]).astype(np.int64)
if self.use_poster:
- mov_poster_arr = np.reshape(np.array(mov_poster_list)/127.5 - 1, [BATCHSIZE, 3, 64, 64]).astype(np.float32)
+ mov_poster_arr = np.reshape(
+ np.array(mov_poster_list) / 127.5 - 1,
+ [BATCHSIZE, 3, 64, 64]).astype(np.float32)
else:
mov_poster_arr = np.array([0.])
- scores_arr = np.reshape(np.array(score_list), [-1, 1]).astype(np.float32)
+ scores_arr = np.reshape(np.array(score_list),
+ [-1, 1]).astype(np.float32)
# 放回当前批次数据
yield [usr_id_arr, usr_gender_arr, usr_age_arr, usr_job_arr], \
@@ -212,4 +234,5 @@ def data_generator():
usr_id_list, usr_gender_list, usr_age_list, usr_job_list = [], [], [], []
mov_id_list, mov_tit_list, mov_cat_list, score_list = [], [], [], []
mov_poster_list = []
- return data_generator
\ No newline at end of file
+
+ return data_generator
diff --git a/junior_class/chapter-7-Recommendation_System/code/nets/DSSM.py b/junior_class/chapter-7-Recommendation_System/code/nets/DSSM.py
old mode 100644
new mode 100755
index 5d49c8eb5..ede684905
--- a/junior_class/chapter-7-Recommendation_System/code/nets/DSSM.py
+++ b/junior_class/chapter-7-Recommendation_System/code/nets/DSSM.py
@@ -22,71 +22,98 @@
from PIL import Image
import math
+
class Model(paddle.nn.Layer):
- def __init__(self, use_poster, use_mov_title, use_mov_cat, use_age_job,fc_sizes,dataset):
+ def __init__(self, use_poster, use_mov_title, use_mov_cat, use_age_job,
+ fc_sizes, dataset):
super(Model, self).__init__()
-
+
# 将传入的name信息和bool型参数添加到模型类中
self.use_mov_poster = use_poster
self.use_mov_title = use_mov_title
self.use_usr_age_job = use_age_job
self.use_mov_cat = use_mov_cat
- self.fc_sizes=fc_sizes
- self.Dataset=dataset
+ self.fc_sizes = fc_sizes
+ self.Dataset = dataset
# 获取数据集的信息,并构建训练和验证集的数据迭代器
-
- usr_embedding_dim=32
- gender_embeding_dim=16
- age_embedding_dim=16
- job_embedding_dim=16
-
- mov_embedding_dim=16
- category_embedding_dim=16
- title_embedding_dim=32
+ usr_embedding_dim = 32
+ gender_embeding_dim = 16
+ age_embedding_dim = 16
+ job_embedding_dim = 16
+ mov_embedding_dim = 16
+ category_embedding_dim = 16
+ title_embedding_dim = 32
""" define network layer for embedding usr info """
USR_ID_NUM = self.Dataset.max_usr_id + 1
-
+
# 对用户ID做映射,并紧接着一个Linear层
- self.usr_emb = Embedding(num_embeddings=USR_ID_NUM, embedding_dim=usr_embedding_dim, sparse=False)
+ self.usr_emb = Embedding(
+ num_embeddings=USR_ID_NUM,
+ embedding_dim=usr_embedding_dim,
+ sparse=False)
self.usr_fc = Linear(in_features=usr_embedding_dim, out_features=32)
-
+
# 对用户性别信息做映射,并紧接着一个Linear层
USR_GENDER_DICT_SIZE = 2
- self.usr_gender_emb = Embedding(num_embeddings=USR_GENDER_DICT_SIZE, embedding_dim=gender_embeding_dim)
- self.usr_gender_fc = Linear(in_features=gender_embeding_dim, out_features=16)
-
+ self.usr_gender_emb = Embedding(
+ num_embeddings=USR_GENDER_DICT_SIZE,
+ embedding_dim=gender_embeding_dim)
+ self.usr_gender_fc = Linear(
+ in_features=gender_embeding_dim, out_features=16)
+
# 对用户年龄信息做映射,并紧接着一个Linear层
USR_AGE_DICT_SIZE = self.Dataset.max_usr_age + 1
- self.usr_age_emb = Embedding(num_embeddings=USR_AGE_DICT_SIZE, embedding_dim=age_embedding_dim)
- self.usr_age_fc = Linear(in_features=age_embedding_dim, out_features=16)
-
+ self.usr_age_emb = Embedding(
+ num_embeddings=USR_AGE_DICT_SIZE, embedding_dim=age_embedding_dim)
+ self.usr_age_fc = Linear(
+ in_features=age_embedding_dim, out_features=16)
+
# 对用户职业信息做映射,并紧接着一个Linear层
USR_JOB_DICT_SIZE = self.Dataset.max_usr_job + 1
- self.usr_job_emb = Embedding(num_embeddings=USR_JOB_DICT_SIZE, embedding_dim=job_embedding_dim)
- self.usr_job_fc = Linear(in_features=job_embedding_dim, out_features=16)
-
+ self.usr_job_emb = Embedding(
+ num_embeddings=USR_JOB_DICT_SIZE, embedding_dim=job_embedding_dim)
+ self.usr_job_fc = Linear(
+ in_features=job_embedding_dim, out_features=16)
+
# 新建一个Linear层,用于整合用户数据信息
self.usr_combined = Linear(in_features=80, out_features=200)
-
""" define network layer for embedding usr info """
# 对电影ID信息做映射,并紧接着一个Linear层
MOV_DICT_SIZE = self.Dataset.max_mov_id + 1
- self.mov_emb = Embedding(num_embeddings=MOV_DICT_SIZE, embedding_dim=mov_embedding_dim)
+ self.mov_emb = Embedding(
+ num_embeddings=MOV_DICT_SIZE, embedding_dim=mov_embedding_dim)
self.mov_fc = Linear(in_features=mov_embedding_dim, out_features=32)
-
+
# 对电影类别做映射
CATEGORY_DICT_SIZE = len(self.Dataset.movie_cat) + 1
- self.mov_cat_emb = Embedding(num_embeddings=CATEGORY_DICT_SIZE, embedding_dim=category_embedding_dim, sparse=False)
- self.mov_cat_fc = Linear(in_features=category_embedding_dim, out_features=32)
-
+ self.mov_cat_emb = Embedding(
+ num_embeddings=CATEGORY_DICT_SIZE,
+ embedding_dim=category_embedding_dim,
+ sparse=False)
+ self.mov_cat_fc = Linear(
+ in_features=category_embedding_dim, out_features=32)
+
# 对电影名称做映射
MOV_TITLE_DICT_SIZE = len(self.Dataset.movie_title) + 1
- self.mov_title_emb = Embedding(num_embeddings=MOV_TITLE_DICT_SIZE, embedding_dim=title_embedding_dim, sparse=False)
- self.mov_title_conv = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=(2,1), padding=0)
- self.mov_title_conv2 = Conv2D(in_channels=1, out_channels=1, kernel_size=(3, 1), stride=1, padding=0)
-
+ self.mov_title_emb = Embedding(
+ num_embeddings=MOV_TITLE_DICT_SIZE,
+ embedding_dim=title_embedding_dim,
+ sparse=False)
+ self.mov_title_conv = Conv2D(
+ in_channels=1,
+ out_channels=1,
+ kernel_size=(3, 1),
+ stride=(2, 1),
+ padding=0)
+ self.mov_title_conv2 = Conv2D(
+ in_channels=1,
+ out_channels=1,
+ kernel_size=(3, 1),
+ stride=1,
+ padding=0)
+
# 新建一个Linear层,用于整合电影特征
self.mov_concat_embed = Linear(in_features=96, out_features=200)
@@ -124,7 +151,7 @@ def __init__(self, use_poster, use_mov_title, use_mov_cat, use_age_job,fc_sizes,
act = paddle.nn.ReLU()
self.add_sublayer('movie_act_%d' % i, act)
self._movie_layers.append(act)
-
+
# 定义计算用户特征的前向运算过程
def get_usr_feat(self, usr_var):
""" get usr features"""
@@ -136,7 +163,7 @@ def get_usr_feat(self, usr_var):
usr_id = self.usr_fc(usr_id)
usr_id = F.relu(usr_id)
feats_collect.append(usr_id)
-
+
# 计算用户的性别特征,并保存在feats_collect中
usr_gender = self.usr_gender_emb(usr_gender)
usr_gender = self.usr_gender_fc(usr_gender)
@@ -154,7 +181,7 @@ def get_usr_feat(self, usr_var):
usr_job = self.usr_job_fc(usr_job)
usr_job = F.relu(usr_job)
feats_collect.append(usr_job)
-
+
# 将用户的特征级联,并通过Linear层得到最终的用户特征
usr_feat = paddle.concat(feats_collect, axis=1)
user_features = F.tanh(self.usr_combined(usr_feat))
@@ -178,7 +205,7 @@ def get_mov_feat(self, mov_var):
mov_id = self.mov_fc(mov_id)
mov_id = F.relu(mov_id)
feats_collect.append(mov_id)
-
+
# 如果使用电影的种类数据,计算电影种类特征的映射
if self.use_mov_cat:
# 计算电影种类的特征映射,对多个种类的特征求和得到最终特征
@@ -191,31 +218,32 @@ def get_mov_feat(self, mov_var):
if self.use_mov_title:
# 计算电影名字的特征映射,对特征映射使用卷积计算最终的特征
mov_title = self.mov_title_emb(mov_title)
- mov_title = F.relu(self.mov_title_conv2(F.relu(self.mov_title_conv(mov_title))))
+ mov_title = F.relu(
+ self.mov_title_conv2(F.relu(self.mov_title_conv(mov_title))))
mov_title = paddle.sum(mov_title, axis=2, keepdim=False)
mov_title = F.relu(mov_title)
mov_title = paddle.reshape(mov_title, [batch_size, -1])
-
+
feats_collect.append(mov_title)
-
+
# 使用一个全连接层,整合所有电影特征,映射为一个200维的特征向量
mov_feat = paddle.concat(feats_collect, axis=1)
mov_features = F.tanh(self.mov_concat_embed(mov_feat))
for n_layer in self._movie_layers:
mov_features = n_layer(mov_features)
-
+
return mov_features
-
+
# 定义个性化推荐算法的前向计算
def forward(self, usr_var, mov_var):
# 计算用户特征和电影特征
user_features = self.get_usr_feat(usr_var)
mov_features = self.get_mov_feat(mov_var)
-
-
+
#使用余弦相似度算子,计算用户和电影的相似程度
- sim = F.cosine_similarity(user_features, mov_features, axis=1).reshape([-1, 1])
+ sim = F.cosine_similarity(
+ user_features, mov_features, axis=1).reshape([-1, 1])
# 将相似度扩大范围到和电影评分相同数据范围
res = paddle.scale(sim, scale=5)
- return user_features, mov_features, res
\ No newline at end of file
+ return user_features, mov_features, res
diff --git a/junior_class/chapter-7-Recommendation_System/code/train.py b/junior_class/chapter-7-Recommendation_System/code/train.py
old mode 100644
new mode 100755
index 9800d385a..256250d1b
--- a/junior_class/chapter-7-Recommendation_System/code/train.py
+++ b/junior_class/chapter-7-Recommendation_System/code/train.py
@@ -21,18 +21,20 @@
from nets.DSSM import Model
from movielens_dataset import MovieLen
-def train(model,train_loader,Epoches):
+
+def train(model, train_loader, Epoches):
# 配置训练参数
lr = 0.001
- paddle.set_device('cpu')
+ paddle.set_device('cpu')
# 启动训练
model.train()
# 获得数据读取器
data_loader = train_loader
# 使用adam优化器,学习率使用0.01
- opt = paddle.optimizer.Adam(learning_rate=lr, parameters=model.parameters())
-
+ opt = paddle.optimizer.Adam(
+ learning_rate=lr, parameters=model.parameters())
+
for epoch in range(0, Epoches):
for idx, data in enumerate(data_loader()):
# 获得数据,并转为tensor格式
@@ -47,18 +49,20 @@ def train(model,train_loader,Epoches):
avg_loss = paddle.mean(loss)
if idx % 500 == 0:
- print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, idx, avg_loss.numpy()))
-
+ print("epoch: {}, batch_id: {}, loss is: {}".format(
+ epoch, idx, avg_loss.numpy()))
+
# 损失函数下降,并清除梯度
avg_loss.backward()
opt.step()
opt.clear_grad()
# 每个epoch 保存一次模型
- paddle.save(model.state_dict(), './checkpoint/epoch'+str(epoch)+'.pdparams')
+ paddle.save(model.state_dict(),
+ './checkpoint/epoch' + str(epoch) + '.pdparams')
-def evaluation(model, params_file_path,valid_loader):
+def evaluation(model, params_file_path, valid_loader):
print(params_file_path)
# print(model.parameters())
model_state_dict = paddle.load(params_file_path)
@@ -67,7 +71,7 @@ def evaluation(model, params_file_path,valid_loader):
acc_set = []
avg_loss_set = []
- squaredError=[]
+ squaredError = []
for idx, data in enumerate(valid_loader()):
usr, mov, score_label = data
usr_v = [paddle.to_tensor(var) for var in usr]
@@ -82,32 +86,36 @@ def evaluation(model, params_file_path,valid_loader):
squaredError.extend(np.abs(pred_scores - score_label)**2)
diff = np.abs(pred_scores - score_label)
- diff[diff>0.5] = 1
+ diff[diff > 0.5] = 1
acc = 1 - np.mean(diff)
acc_set.append(acc)
break
- RMSE=sqrt(np.sum(squaredError) / len(squaredError))
- return np.mean(acc_set), np.mean(avg_loss_set),RMSE
+ RMSE = sqrt(np.sum(squaredError) / len(squaredError))
+ return np.mean(acc_set), np.mean(avg_loss_set), RMSE
-if __name__=="__main__":
+if __name__ == "__main__":
# 启动训练
- fc_sizes=[128, 64, 32]
- Epoches=2
+ fc_sizes = [128, 64, 32]
+ Epoches = 2
# 定义数据迭代Batch大小
BATCHSIZE = 256
use_poster, use_mov_title, use_mov_cat, use_age_job = False, True, True, True
- data_path='../data/'
- Dataset = MovieLen(use_poster,data_path)
+ data_path = '../data/'
+ Dataset = MovieLen(use_poster, data_path)
trainset = Dataset.train_dataset
valset = Dataset.valid_dataset
- train_loader = Dataset.load_data(dataset=trainset, mode='train',batch_size=BATCHSIZE)
- valid_loader =Dataset.load_data(dataset=valset, mode='valid',batch_size=BATCHSIZE)
- model = Model(use_poster, use_mov_title, use_mov_cat, use_age_job,fc_sizes,Dataset)
+ train_loader = Dataset.load_data(
+ dataset=trainset, mode='train', batch_size=BATCHSIZE)
+ valid_loader = Dataset.load_data(
+ dataset=valset, mode='valid', batch_size=BATCHSIZE)
+ model = Model(use_poster, use_mov_title, use_mov_cat, use_age_job,
+ fc_sizes, Dataset)
# train(model,train_loader,Epoches)
param_path = "./checkpoint/epoch"
for i in range(Epoches):
- acc, mae,rmse = evaluation(model, param_path+str(i)+'.pdparams',valid_loader)
- print("ACC:", acc, "MAE:", mae,'RMSE:',rmse)
+ acc, mae, rmse = evaluation(model, param_path + str(i) + '.pdparams',
+ valid_loader)
+ print("ACC:", acc, "MAE:", mae, 'RMSE:', rmse)
diff --git a/junior_class/chapter-7-Recommendation_System/notebook/7-1-Introduction_to_Recommended_System.ipynb b/junior_class/chapter-7-Recommendation_System/notebook/7-1-Introduction_to_Recommended_System.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-7-Recommendation_System/notebook/7-2-Recommended System-Data_Processing.ipynb b/junior_class/chapter-7-Recommendation_System/notebook/7-2-Recommended System-Data_Processing.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-7-Recommendation_System/notebook/7-3-Design_Movie_Recommended_System.ipynb b/junior_class/chapter-7-Recommendation_System/notebook/7-3-Design_Movie_Recommended_System.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-7-Recommendation_System/notebook/7-4-Movie_Recommended_System-Training_and_Extract_Feature.ipynb b/junior_class/chapter-7-Recommendation_System/notebook/7-4-Movie_Recommended_System-Training_and_Extract_Feature.ipynb
old mode 100644
new mode 100755
diff --git a/junior_class/chapter-7-Recommendation_System/notebook/7-5-Movie_Recommended_System.ipynb b/junior_class/chapter-7-Recommendation_System/notebook/7-5-Movie_Recommended_System.ipynb
old mode 100644
new mode 100755
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/.idea/.gitignore b/transformer_courses/Application_of_transformer_in_image_classification/.idea/.gitignore
old mode 100644
new mode 100755
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/.idea/inspectionProfiles/profiles_settings.xml b/transformer_courses/Application_of_transformer_in_image_classification/.idea/inspectionProfiles/profiles_settings.xml
old mode 100644
new mode 100755
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/.idea/misc.xml b/transformer_courses/Application_of_transformer_in_image_classification/.idea/misc.xml
old mode 100644
new mode 100755
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/.idea/modules.xml b/transformer_courses/Application_of_transformer_in_image_classification/.idea/modules.xml
old mode 100644
new mode 100755
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/.idea/transformer.iml b/transformer_courses/Application_of_transformer_in_image_classification/.idea/transformer.iml
old mode 100644
new mode 100755
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/.idea/vcs.xml b/transformer_courses/Application_of_transformer_in_image_classification/.idea/vcs.xml
old mode 100644
new mode 100755
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/README.md b/transformer_courses/Application_of_transformer_in_image_classification/README.md
old mode 100644
new mode 100755
index 836b30227..1b101f840
--- a/transformer_courses/Application_of_transformer_in_image_classification/README.md
+++ b/transformer_courses/Application_of_transformer_in_image_classification/README.md
@@ -64,7 +64,7 @@ DeiT(Data-efficient Image Transformers)系列模型是由FaceBook在2020年
可以通过以下方式开始模型评估过程
```bash
-python3 eval.py
+python3 eval.py
--model ViT \
--data data/ILSVRC2012_val
```
@@ -72,4 +72,4 @@ python3 eval.py
上述命令中,需要传入如下参数:
+ `model`: 模型名称,默认值为 `ViT`,可以更换为 `DeiT`;
-+ `data`: 保存ImageNet验证集的目录, 默认值为 `data/ILSVRC2012_val`。
\ No newline at end of file
++ `data`: 保存ImageNet验证集的目录, 默认值为 `data/ILSVRC2012_val`。
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/README_en.md b/transformer_courses/Application_of_transformer_in_image_classification/README_en.md
old mode 100644
new mode 100755
index 218efd40e..1fbab3cfc
--- a/transformer_courses/Application_of_transformer_in_image_classification/README_en.md
+++ b/transformer_courses/Application_of_transformer_in_image_classification/README_en.md
@@ -37,7 +37,7 @@ DeiT(Data-efficient Image Transformers) series models were proposed by Facebook
tar -xvf ILSVRC2012_val.tar
cd ../
```
-
+
- Please organize data dir as below
```
@@ -62,7 +62,7 @@ DeiT(Data-efficient Image Transformers) series models were proposed by Facebook
The model evaluation process can be started as follows
```bash
-python3 eval.py
+python3 eval.py
--model ViT \
--data data/ILSVRC2012_val
```
@@ -71,4 +71,3 @@ Among them:
+ `model`: Model name, The default value is `ViT`, which can be changed to `DeiT`;
+ `data`: The directory to save the ImageNet verification set, the default value is `data/ILSVRC2012_val`.
-
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/dataset.py b/transformer_courses/Application_of_transformer_in_image_classification/dataset.py
old mode 100644
new mode 100755
index d84259572..a1bed4633
--- a/transformer_courses/Application_of_transformer_in_image_classification/dataset.py
+++ b/transformer_courses/Application_of_transformer_in_image_classification/dataset.py
@@ -14,7 +14,8 @@
import os
from paddle.io import Dataset
-from transform import transform
+from transform import transform
+
# 读取数据,如果是训练数据,随即打乱数据顺序
def get_file_list(file_list):
@@ -44,4 +45,4 @@ def __getitem__(self, idx):
return (transformed_img, int(label))
def __len__(self):
- return self.num_samples
\ No newline at end of file
+ return self.num_samples
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/eval.py b/transformer_courses/Application_of_transformer_in_image_classification/eval.py
old mode 100644
new mode 100755
index 9405eb74f..c470694d0
--- a/transformer_courses/Application_of_transformer_in_image_classification/eval.py
+++ b/transformer_courses/Application_of_transformer_in_image_classification/eval.py
@@ -30,25 +30,25 @@ def eval(args):
# 实例化模型
if args.model == 'ViT':
model = VisionTransformer(
- patch_size=16,
- class_dim=1000,
- embed_dim=768,
- depth=12,
- num_heads=12,
- mlp_ratio=4,
- qkv_bias=True,
- epsilon=1e-6)
+ patch_size=16,
+ class_dim=1000,
+ embed_dim=768,
+ depth=12,
+ num_heads=12,
+ mlp_ratio=4,
+ qkv_bias=True,
+ epsilon=1e-6)
params_file_path = "model_file/ViT_base_patch16_384_pretrained.pdparams"
else:
model = DistilledVisionTransformer(
- patch_size=16,
- embed_dim=768,
- depth=12,
- num_heads=12,
- mlp_ratio=4,
- qkv_bias=True,
- epsilon=1e-6)
- params_file_path="model_file/DeiT_base_distilled_patch16_384_pretrained.pdparams"
+ patch_size=16,
+ embed_dim=768,
+ depth=12,
+ num_heads=12,
+ mlp_ratio=4,
+ qkv_bias=True,
+ epsilon=1e-6)
+ params_file_path = "model_file/DeiT_base_distilled_patch16_384_pretrained.pdparams"
# 加载模型参数
model_state_dict = paddle.load(params_file_path)
@@ -62,7 +62,8 @@ def eval(args):
val_dataset = ImageNetDataset(args.data, VAL_FILE_LIST)
# 使用paddle.io.DataLoader创建数据读取器,并设置batchsize,进程数量num_workers等参数
- val_loader = paddle.io.DataLoader(val_dataset, batch_size=2, num_workers=1, drop_last=True)
+ val_loader = paddle.io.DataLoader(
+ val_dataset, batch_size=2, num_workers=1, drop_last=True)
acc_set = []
avg_loss_set = []
@@ -83,11 +84,16 @@ def eval(args):
acc_set.append(acc.numpy())
avg_loss_set.append(loss.numpy())
- print("[validation] accuracy/loss: {}/{}".format(np.mean(acc_set), np.mean(avg_loss_set)))
+ print("[validation] accuracy/loss: {}/{}".format(
+ np.mean(acc_set), np.mean(avg_loss_set)))
+
if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='Evaluation of Transformer based on ImageNet')
- parser.add_argument('--model', type=str, default='ViT', help='Transformer model')
- parser.add_argument('--data', type=str, default='data/ILSVRC2012_val', help='Data dir')
+ parser = argparse.ArgumentParser(
+ description='Evaluation of Transformer based on ImageNet')
+ parser.add_argument(
+ '--model', type=str, default='ViT', help='Transformer model')
+ parser.add_argument(
+ '--data', type=str, default='data/ILSVRC2012_val', help='Data dir')
args = parser.parse_args()
eval(args)
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/model.py b/transformer_courses/Application_of_transformer_in_image_classification/model.py
old mode 100644
new mode 100755
index ef87a4872..576e06974
--- a/transformer_courses/Application_of_transformer_in_image_classification/model.py
+++ b/transformer_courses/Application_of_transformer_in_image_classification/model.py
@@ -21,10 +21,12 @@
zeros_ = nn.initializer.Constant(value=0.)
ones_ = nn.initializer.Constant(value=1.)
+
# 将输入 x 由 int 类型转为 tuple 类型
def to_2tuple(x):
return tuple([x] * 2)
+
# 定义一个什么操作都不进行的网络层
class Identity(nn.Layer):
def __init__(self):
@@ -144,6 +146,7 @@ def drop_path(x, drop_prob=0., training=False):
output = x.divide(keep_prob) * random_tensor
return output
+
class DropPath(nn.Layer):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
@@ -154,7 +157,8 @@ def forward(self, x):
class Block(nn.Layer):
- def __init__(self, dim,
+ def __init__(self,
+ dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
@@ -257,6 +261,7 @@ def __init__(self,
trunc_normal_(self.pos_embed)
trunc_normal_(self.cls_token)
self.apply(self._init_weights)
+
# 参数初始化
def _init_weights(self, m):
if isinstance(m, nn.Linear):
@@ -266,6 +271,7 @@ def _init_weights(self, m):
elif isinstance(m, nn.LayerNorm):
zeros_(m.bias)
ones_(m.weight)
+
# 获取图像特征
def forward_features(self, x):
B = paddle.shape(x)[0]
@@ -337,6 +343,7 @@ def __init__(self,
trunc_normal_(self.dist_token)
trunc_normal_(self.pos_embed)
self.head_dist.apply(self._init_weights)
+
# 获取图像特征
def forward_features(self, x):
B = paddle.shape(x)[0]
@@ -364,4 +371,4 @@ def forward(self, x):
x = self.head(x)
x_dist = self.head_dist(x_dist)
# 取 class token以及distillation token 的平均值作为结果
- return (x + x_dist) / 2
\ No newline at end of file
+ return (x + x_dist) / 2
diff --git a/transformer_courses/Application_of_transformer_in_image_classification/transform.py b/transformer_courses/Application_of_transformer_in_image_classification/transform.py
old mode 100644
new mode 100755
index 9c06eb8b7..e19befaec
--- a/transformer_courses/Application_of_transformer_in_image_classification/transform.py
+++ b/transformer_courses/Application_of_transformer_in_image_classification/transform.py
@@ -22,8 +22,7 @@ def decode_image(img, to_rgb=True):
data = np.frombuffer(img, dtype='uint8')
img = cv2.imdecode(data, 1)
if to_rgb:
- assert img.shape[2] == 3, 'invalid shape of image[%s]' % (
- img.shape)
+ assert img.shape[2] == 3, 'invalid shape of image[%s]' % (img.shape)
img = img[:, :, ::-1]
return img
@@ -76,7 +75,7 @@ def crop_image(img, size):
# 定义normalize_image函数,对图片进行归一化
-def normalize_image(img, scale=None, mean=None, std=None, order= ''):
+def normalize_image(img, scale=None, mean=None, std=None, order=''):
if isinstance(scale, str):
scale = eval(scale)
scale = np.float32(scale if scale is not None else 1.0 / 255.0)
@@ -112,8 +111,8 @@ def transform(data, mode='train'):
# 图像裁剪
data = crop_image(data, size=384)
# 标准化
- data = normalize_image(data, scale=1./255., mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+ data = normalize_image(
+ data, scale=1. / 255., mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
# 通道变换
data = to_CHW_image(data)
return data
-
diff --git a/transformer_courses/Application_of_transformer_in_image_classification_Swin/README.md b/transformer_courses/Application_of_transformer_in_image_classification_Swin/README.md
old mode 100644
new mode 100755
index 16bf17b9d..b2bbc68db
--- a/transformer_courses/Application_of_transformer_in_image_classification_Swin/README.md
+++ b/transformer_courses/Application_of_transformer_in_image_classification_Swin/README.md
@@ -59,7 +59,7 @@ Swin Transformer是一种新的视觉领域的Transformer模型,来自论文
可以通过以下方式开始模型评估过程
```bash
-python3 eval.py
+python3 eval.py
--model SwinTransformer \
--data data/ILSVRC2012_val
```
@@ -67,4 +67,4 @@ python3 eval.py
上述命令中,需要传入如下参数:
+ `model`: 模型名称;
-+ `data`: 保存ImageNet验证集的目录, 默认值为 `data/ILSVRC2012_val`。
\ No newline at end of file
++ `data`: 保存ImageNet验证集的目录, 默认值为 `data/ILSVRC2012_val`。
diff --git a/transformer_courses/Application_of_transformer_in_image_classification_Swin/README_en.md b/transformer_courses/Application_of_transformer_in_image_classification_Swin/README_en.md
old mode 100644
new mode 100755
index 52c288f9f..8d150d892
--- a/transformer_courses/Application_of_transformer_in_image_classification_Swin/README_en.md
+++ b/transformer_courses/Application_of_transformer_in_image_classification_Swin/README_en.md
@@ -36,7 +36,7 @@ Swin Transformer is a new Transformer model of computer vision, from the paper "
tar -xvf ILSVRC2012_val.tar
cd ../
```
-
+
- Please organize data dir as below
```
@@ -60,7 +60,7 @@ Swin Transformer is a new Transformer model of computer vision, from the paper "
The model evaluation process can be started as follows
```bash
-python3 eval.py
+python3 eval.py
--model SwinTransformer \
--data data/ILSVRC2012_val
```
diff --git a/transformer_courses/Application_of_transformer_in_image_classification_Swin/dataset.py b/transformer_courses/Application_of_transformer_in_image_classification_Swin/dataset.py
old mode 100644
new mode 100755
index 740a6b89e..a1bed4633
--- a/transformer_courses/Application_of_transformer_in_image_classification_Swin/dataset.py
+++ b/transformer_courses/Application_of_transformer_in_image_classification_Swin/dataset.py
@@ -14,7 +14,8 @@
import os
from paddle.io import Dataset
-from transform import transform
+from transform import transform
+
# 读取数据,如果是训练数据,随即打乱数据顺序
def get_file_list(file_list):
diff --git a/transformer_courses/Application_of_transformer_in_image_classification_Swin/eval.py b/transformer_courses/Application_of_transformer_in_image_classification_Swin/eval.py
old mode 100644
new mode 100755
index 9593c9217..c8e37a638
--- a/transformer_courses/Application_of_transformer_in_image_classification_Swin/eval.py
+++ b/transformer_courses/Application_of_transformer_in_image_classification_Swin/eval.py
@@ -20,6 +20,7 @@
from swin_transformer import *
from dataset import ImageNetDataset
+
def eval(args):
# 开启0号GPU
use_gpu = True
@@ -28,12 +29,12 @@ def eval(args):
print('start evaluation .......')
# 实例化模型
if args.model == 'SwinTransformer':
- model = SwinTransformer_tiny_patch4_window7_224(pretrained=False,
- use_ssld=False)
+ model = SwinTransformer_tiny_patch4_window7_224(
+ pretrained=False, use_ssld=False)
params_file_path = "./model_file/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams"
else:
- raise ValueError("No model named:"+args.model)
+ raise ValueError("No model named:" + args.model)
# 加载模型参数
model_state_dict = paddle.load(params_file_path)
@@ -47,7 +48,8 @@ def eval(args):
val_dataset = ImageNetDataset(args.data, VAL_FILE_LIST)
# 使用paddle.io.DataLoader创建数据读取器,并设置batchsize,进程数量num_workers等参数
- val_loader = paddle.io.DataLoader(val_dataset, batch_size=64, num_workers=4, drop_last=False)
+ val_loader = paddle.io.DataLoader(
+ val_dataset, batch_size=64, num_workers=4, drop_last=False)
acc_set = []
avg_loss_set = []
@@ -69,16 +71,22 @@ def eval(args):
acc_set.append(acc.numpy())
avg_loss_set.append(loss.numpy())
- print("batch:",batch_id,",acc:",acc.numpy())#,"loss:",loss.numpy())
-
- print("acc_set shape:",len(acc_set))
+ print("batch:", batch_id, ",acc:",
+ acc.numpy()) #,"loss:",loss.numpy())
+
+ print("acc_set shape:", len(acc_set))
print("[validation] accuracy: {}".format(np.mean(acc_set)))
+
if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='Evaluation of SwinTransformer based on ImageNet')
- parser.add_argument('--model', type=str, default='SwinTransformer', help='Transformer model')
- parser.add_argument('--data', type=str, default='data/ILSVRC2012_val', help='Data dir')
+ parser = argparse.ArgumentParser(
+ description='Evaluation of SwinTransformer based on ImageNet')
+ parser.add_argument(
+ '--model',
+ type=str,
+ default='SwinTransformer',
+ help='Transformer model')
+ parser.add_argument(
+ '--data', type=str, default='data/ILSVRC2012_val', help='Data dir')
args = parser.parse_args()
eval(args)
-
-
\ No newline at end of file
diff --git a/transformer_courses/Application_of_transformer_in_image_classification_Swin/swin_transformer.py b/transformer_courses/Application_of_transformer_in_image_classification_Swin/swin_transformer.py
old mode 100644
new mode 100755
index eb37e6756..bf265b83d
--- a/transformer_courses/Application_of_transformer_in_image_classification_Swin/swin_transformer.py
+++ b/transformer_courses/Application_of_transformer_in_image_classification_Swin/swin_transformer.py
@@ -13,9 +13,11 @@
WEIGHTS_HOME = './weights'
+
def to_2tuple(x):
return tuple([x] * 2)
+
class Identity(nn.Layer):
def __init__(self):
super(Identity, self).__init__()
@@ -23,6 +25,7 @@ def __init__(self):
def forward(self, input):
return input
+
def drop_path(x, drop_prob=0., training=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
@@ -37,6 +40,7 @@ def drop_path(x, drop_prob=0., training=False):
output = x.divide(keep_prob) * random_tensor
return output
+
class DropPath(nn.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
@@ -48,6 +52,7 @@ def __init__(self, drop_prob=None):
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
+
# 多层感知机
class Mlp(nn.Layer):
def __init__(self,
@@ -72,6 +77,7 @@ def forward(self, x):
x = self.drop(x)
return x
+
# 窗口划分
def window_partition(x, window_size):
"""
@@ -81,13 +87,14 @@ def window_partition(x, window_size):
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
- B, H, W, C = x.shape # batch,Height,Width,Channel
+ B, H, W, C = x.shape # batch,Height,Width,Channel
x = x.reshape(
[B, H // window_size, window_size, W // window_size, window_size, C])
windows = x.transpose([0, 1, 3, 2, 4, 5]).reshape(
[-1, window_size, window_size, C])
return windows
+
# 变换
def window_reverse(windows, window_size, H, W, C):
"""
@@ -104,6 +111,7 @@ def window_reverse(windows, window_size, H, W, C):
x = x.transpose([0, 1, 3, 2, 4, 5]).reshape([-1, H, W, C])
return x
+
class WindowAttention(nn.Layer):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
@@ -232,6 +240,7 @@ def flops(self, N):
flops += N * self.dim * self.dim
return flops
+
class SwinTransformerBlock(nn.Layer):
r""" Swin Transformer Block.
Args:
@@ -392,6 +401,7 @@ def flops(self):
flops += self.dim * H * W
return flops
+
class PatchMerging(nn.Layer):
r""" Patch Merging Layer.
Args:
@@ -406,7 +416,7 @@ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias_attr=False)
self.norm = norm_layer(4 * dim)
-
+
def forward(self, x):
"""
x: B, H*W, C
@@ -430,7 +440,7 @@ def forward(self, x):
x = self.reduction(x)
return x
-
+
def extra_repr(self):
return "input_resolution={}, dim={}".format(self.input_resolution,
self.dim)
@@ -441,6 +451,7 @@ def flops(self):
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
+
class BasicLayer(nn.Layer):
""" A basic Swin Transformer layer for one stage.
Args:
@@ -517,7 +528,7 @@ def forward(self, x):
def extra_repr(self):
return "dim={}, input_resolution={}, depth={}".format(
self.dim, self.input_resolution, self.depth)
-
+
def flops(self):
flops = 0
for blk in self.blocks:
@@ -526,6 +537,7 @@ def flops(self):
flops += self.downsample.flops()
return flops
+
class PatchEmbed(nn.Layer):
""" Image to Patch Embedding
Args:
@@ -582,6 +594,7 @@ def flops(self):
flops += Ho * Wo * self.embed_dim
return flops
+
class SwinTransformer(nn.Layer):
""" Swin Transformer
A PaddlePaddle impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
@@ -699,7 +712,6 @@ def _init_weights(self, m):
elif isinstance(m, nn.LayerNorm):
zeros_(m.bias)
ones_(m.weight)
-
def forward_features(self, x):
x = self.patch_embed(x)
@@ -730,6 +742,7 @@ def flops(self):
flops += self.num_features * self.num_classes
return flops
+
def is_url(path):
"""
Whether path is URL.
@@ -738,6 +751,7 @@ def is_url(path):
"""
return str(path).startswith('http://') or str(path).startswith('https://')
+
def get_path_from_url(url,
root_dir,
md5sum=None,
@@ -783,6 +797,7 @@ def get_path_from_url(url,
return fullpath
+
def get_weights_path_from_url(url, md5sum=None):
"""Get weights path from WEIGHT_HOME, if not exists,
download it from url.
@@ -801,6 +816,7 @@ def get_weights_path_from_url(url, md5sum=None):
path = get_path_from_url(url, WEIGHTS_HOME, md5sum)
return path
+
def load_dygraph_pretrain_from_url(model, pretrained_url, use_ssld):
if use_ssld:
pretrained_url = pretrained_url.replace("_pretrained",
@@ -810,6 +826,7 @@ def load_dygraph_pretrain_from_url(model, pretrained_url, use_ssld):
load_dygraph_pretrain(model, path=local_weight_path)
return
+
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
if pretrained is False:
pass
@@ -822,6 +839,7 @@ def _load_pretrained(pretrained, model, model_url, use_ssld=False):
"pretrained type is not available. Please use `string` or `boolean` type."
)
+
def SwinTransformer_tiny_patch4_window7_224(pretrained=False,
use_ssld=False,
**kwargs):
@@ -832,13 +850,10 @@ def SwinTransformer_tiny_patch4_window7_224(pretrained=False,
window_size=7,
drop_path_rate=0.2,
**kwargs)
- _load_pretrained(
- pretrained,
- model,
- pretrained,
- use_ssld=use_ssld)
+ _load_pretrained(pretrained, model, pretrained, use_ssld=use_ssld)
return model
+
def SwinTransformer_small_patch4_window7_224(pretrained=False,
use_ssld=False,
**kwargs):
@@ -848,13 +863,10 @@ def SwinTransformer_small_patch4_window7_224(pretrained=False,
num_heads=[3, 6, 12, 24],
window_size=7,
**kwargs)
- _load_pretrained(
- pretrained,
- model,
- pretrained,
- use_ssld=use_ssld)
+ _load_pretrained(pretrained, model, pretrained, use_ssld=use_ssld)
return model
+
def SwinTransformer_base_patch4_window7_224(pretrained=False,
use_ssld=False,
**kwargs):
@@ -865,13 +877,10 @@ def SwinTransformer_base_patch4_window7_224(pretrained=False,
window_size=7,
drop_path_rate=0.5,
**kwargs)
- _load_pretrained(
- pretrained,
- model,
- pretrained,
- use_ssld=use_ssld)
+ _load_pretrained(pretrained, model, pretrained, use_ssld=use_ssld)
return model
+
def SwinTransformer_base_patch4_window12_384(pretrained=False,
use_ssld=False,
**kwargs):
@@ -883,13 +892,10 @@ def SwinTransformer_base_patch4_window12_384(pretrained=False,
window_size=12,
drop_path_rate=0.5, # NOTE: do not appear in offical code
**kwargs)
- _load_pretrained(
- pretrained,
- model,
- pretrained,
- use_ssld=use_ssld)
+ _load_pretrained(pretrained, model, pretrained, use_ssld=use_ssld)
return model
+
def SwinTransformer_large_patch4_window7_224(pretrained=False,
use_ssld=False,
**kwargs):
@@ -899,13 +905,10 @@ def SwinTransformer_large_patch4_window7_224(pretrained=False,
num_heads=[6, 12, 24, 48],
window_size=7,
**kwargs)
- _load_pretrained(
- pretrained,
- model,
- pretrained,
- use_ssld=use_ssld)
+ _load_pretrained(pretrained, model, pretrained, use_ssld=use_ssld)
return model
+
def SwinTransformer_large_patch4_window12_384(pretrained=False,
use_ssld=False,
**kwargs):
@@ -916,13 +919,8 @@ def SwinTransformer_large_patch4_window12_384(pretrained=False,
num_heads=[6, 12, 24, 48],
window_size=12,
**kwargs)
- _load_pretrained(
- pretrained,
- model,
- pretrained,
- use_ssld=use_ssld)
+ _load_pretrained(pretrained, model, pretrained, use_ssld=use_ssld)
return model
- # global configs
-
+# global configs
diff --git a/transformer_courses/Application_of_transformer_in_image_classification_Swin/transform.py b/transformer_courses/Application_of_transformer_in_image_classification_Swin/transform.py
old mode 100644
new mode 100755
index 268775ce3..7226b8805
--- a/transformer_courses/Application_of_transformer_in_image_classification_Swin/transform.py
+++ b/transformer_courses/Application_of_transformer_in_image_classification_Swin/transform.py
@@ -17,6 +17,7 @@
from PIL import Image
import six
+
class DecodeImage(object):
""" decode image """
@@ -44,6 +45,7 @@ def __call__(self, img):
return img
+
class ResizeImage(object):
""" resize image """
@@ -75,6 +77,7 @@ def __call__(self, img):
else:
return cv2.resize(img, (w, h), interpolation=self.interpolation)
+
class CropImage(object):
""" crop image """
@@ -94,6 +97,7 @@ def __call__(self, img):
h_end = h_start + h
return img[h_start:h_end, w_start:w_end, :]
+
class NormalizeImage(object):
""" normalize image such as substract mean, divide std
"""
@@ -118,6 +122,7 @@ def __call__(self, img):
np.ndarray), "invalid input 'img' in NormalizeImage"
return (img.astype('float32') * self.scale - self.mean) / self.std
+
class ToCHWImage(object):
""" convert hwc image to chw image
"""
@@ -132,6 +137,7 @@ def __call__(self, img):
return img.transpose((2, 0, 1))
+
# 图像预处理方法汇总
def transform(data, mode='eval'):
@@ -139,15 +145,19 @@ def transform(data, mode='eval'):
decode_image = DecodeImage()
data = decode_image(data)
# 图像缩放
- resize_image = ResizeImage( resize_short=256)
+ resize_image = ResizeImage(resize_short=256)
data = resize_image(data)
# 图像裁剪
crop_image = CropImage(size=224)
data = crop_image(data)
# 标准化
- normalize_image = NormalizeImage(scale=1.0/255.0, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],order='')
+ normalize_image = NormalizeImage(
+ scale=1.0 / 255.0,
+ mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225],
+ order='')
data = normalize_image(data)
# 通道变换
to_CHW_image = ToCHWImage()
data = to_CHW_image(data)
- return data
\ No newline at end of file
+ return data
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/.gitignore b/transformer_courses/BERT_distillation/PaddleSlim-develop/.gitignore
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/.hooks/copyright.hook b/transformer_courses/BERT_distillation/PaddleSlim-develop/.hooks/copyright.hook
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/.hooks/pylint_pre_commit.hook b/transformer_courses/BERT_distillation/PaddleSlim-develop/.hooks/pylint_pre_commit.hook
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/.pre-commit-config.yaml b/transformer_courses/BERT_distillation/PaddleSlim-develop/.pre-commit-config.yaml
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/.style.yapf b/transformer_courses/BERT_distillation/PaddleSlim-develop/.style.yapf
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/LICENSE b/transformer_courses/BERT_distillation/PaddleSlim-develop/LICENSE
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/demo/ofa/bert/export_model.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/demo/ofa/bert/export_model.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/demo/ofa/bert/run_glue_ofa.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/demo/ofa/bert/run_glue_ofa.py
old mode 100644
new mode 100755
index 5c01924e1..79aea8f2d
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/demo/ofa/bert/run_glue_ofa.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/demo/ofa/bert/run_glue_ofa.py
@@ -165,7 +165,12 @@ def set_seed(args):
paddle.seed(args.seed + paddle.distributed.get_rank())
-def evaluate(model, criterion, metric, data_loader, epoch, step,
+def evaluate(model,
+ criterion,
+ metric,
+ data_loader,
+ epoch,
+ step,
width_mult=1.0):
with paddle.no_grad():
model.eval()
@@ -180,8 +185,8 @@ def evaluate(model, criterion, metric, data_loader, epoch, step,
metric.update(correct)
results = metric.accumulate()
print("epoch: %d, batch: %d, width_mult: %s, eval loss: %f, %s: %s\n" %
- (epoch, step, 'teacher' if width_mult == 100 else str(width_mult),
- loss.numpy(), metric.name(), results))
+ (epoch, step, 'teacher' if width_mult == 100 else
+ str(width_mult), loss.numpy(), metric.name(), results))
model.train()
@@ -195,7 +200,8 @@ def bert_forward(self,
self.pooler.dense, 'fn') else self.pooler.dense.weight.dtype
if attention_mask[0] is None:
attention_mask[0] = paddle.unsqueeze(
- (input_ids == self.pad_token_id).astype(wtype) * -1e9, axis=[1, 2])
+ (input_ids == self.pad_token_id).astype(wtype) * -1e9,
+ axis=[1, 2])
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
@@ -264,7 +270,8 @@ def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):
if isinstance(seq_mask, int):
seq_mask = [[seq_mask] * len(seq) for seq in seqs]
if isinstance(separator_mask, int):
- separator_mask = [[separator_mask] * len(sep) for sep in separators]
+ separator_mask = [[separator_mask] * len(sep)
+ for sep in separators]
p_mask = sum((s_mask + mask
for sep, seq, s_mask, mask in zip(
separators, seqs, seq_mask, separator_mask)), [])
@@ -290,8 +297,8 @@ def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):
tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)
# concate the sequences with special tokens
tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]
- tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *
- len(tokens_trun))
+ tokens, segment_ids, _ = _concat_seqs(
+ tokens_trun, [[tokenizer.sep_token]] * len(tokens_trun))
# convert the token to ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
valid_length = len(input_ids)
@@ -534,7 +541,8 @@ def do_train(args):
(time.time() - tic_eval))
else:
acc = evaluate(ofa_model, criterion, metric,
- dev_data_loader, epoch, step, width_mult)
+ dev_data_loader, epoch, step,
+ width_mult)
print("eval done total : %s s" %
(time.time() - tic_eval))
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/__init__.py
old mode 100644
new mode 100755
index 9d3123290..1f7cea92a
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/__init__.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/__init__.py
@@ -22,7 +22,8 @@
from paddleslim import pantheon
from paddleslim import dygraph
__all__ = [
- 'models', 'prune', 'nas', 'analysis', 'dist', 'quant', 'pantheon', 'dygraph'
+ 'models', 'prune', 'nas', 'analysis', 'dist', 'quant', 'pantheon',
+ 'dygraph'
]
from paddleslim.dygraph import *
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/analysis/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/analysis/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/analysis/flops.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/analysis/flops.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/analysis/latency.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/analysis/latency.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/analysis/model_size.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/analysis/model_size.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/analyze_helper.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/analyze_helper.py
old mode 100644
new mode 100755
index f5649df23..b304fbe74
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/analyze_helper.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/analyze_helper.py
@@ -133,7 +133,8 @@ def abs_max_run(self, reader, exe, step=None, loss_name=None):
program = paddle.static.CompiledProgram(
self.program).with_data_parallel(loss_name=loss_name)
for idx, data in enumerate(reader):
- vars_np = exe.run(program=program, feed=data, fetch_list=fetch_list)
+ vars_np = exe.run(
+ program=program, feed=data, fetch_list=fetch_list)
vars_np = [np.max(var) for var in vars_np]
mapped_vars_np = dict(zip(self.real_names, vars_np))
values = self.update(mapped_vars_np)
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/cached_reader.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/cached_reader.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/client.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/client.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller.py
old mode 100644
new mode 100755
index 34def9b01..921e5e9ad
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller.py
@@ -73,8 +73,8 @@ def load_controller(self, program, load_dir):
def get_params(self, program):
var_dict = {}
for var in program.global_block().all_parameters():
- var_dict[var.name] = np.array(paddle.static.global_scope().find_var(
- var.name).get_tensor())
+ var_dict[var.name] = np.array(paddle.static.global_scope()
+ .find_var(var.name).get_tensor())
return var_dict
def set_params(self, program, params_dict, place):
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller_client.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller_client.py
old mode 100644
new mode 100755
index ceb6ecf54..ff1fd63ec
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller_client.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller_client.py
@@ -90,7 +90,8 @@ def next_tokens(self):
(self.server_ip, self.server_port))
if errno != 0:
retry_cnt += 1
- _logger.info("Server is NOT ready, wait 10 second to retry")
+ _logger.info(
+ "Server is NOT ready, wait 10 second to retry")
time.sleep(10)
else:
break
@@ -103,7 +104,8 @@ def next_tokens(self):
else:
socket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- errno = socket_client.connect_ex((self.server_ip, self.server_port))
+ errno = socket_client.connect_ex(
+ (self.server_ip, self.server_port))
if errno != 0:
_logger.info("Server is closed")
os._exit(0)
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller_server.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/controller_server.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/lock.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/lock.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/log_helper.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/log_helper.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/meter.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/meter.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/base_env.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/base_env.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/ddpg_controller.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/ddpg_controller.py
old mode 100644
new mode 100755
index 03138e577..95e1671fb
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/ddpg_controller.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/ddpg_controller.py
@@ -89,7 +89,8 @@ def __init__(self, range_tables, use_gpu=False, **kwargs):
self.obs_dim = kwargs.get('obs_dim')
self.model = kwargs.get(
'model') if 'model' in kwargs else default_ddpg_model
- self.actor_lr = kwargs.get('actor_lr') if 'actor_lr' in kwargs else 1e-4
+ self.actor_lr = kwargs.get(
+ 'actor_lr') if 'actor_lr' in kwargs else 1e-4
self.critic_lr = kwargs.get(
'critic_lr') if 'critic_lr' in kwargs else 1e-3
self.gamma = kwargs.get('gamma') if 'gamma' in kwargs else 0.99
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/ddpg_model.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/ddpg_model.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/noise.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/ddpg/noise.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/lstm/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/lstm/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/lstm/lstm_controller.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/lstm/lstm_controller.py
old mode 100644
new mode 100755
index 4e521d0ae..a07858214
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/lstm/lstm_controller.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/lstm/lstm_controller.py
@@ -40,7 +40,8 @@ def __init__(self, num_layers, hidden_size):
bias_attr = ParamAttr(initializer=uniform_initializer(
1.0 / math.sqrt(hidden_size)))
for i in range(num_layers):
- self.lstm_cells.append(LSTMCell(hidden_size, param_attr, bias_attr))
+ self.lstm_cells.append(
+ LSTMCell(hidden_size, param_attr, bias_attr))
def call(self, inputs, states):
new_states = []
@@ -172,10 +173,12 @@ def _build_program(self, is_inference=False):
default_initializer=uniform_initializer(1.0))
paddle.assign(
- fluid.layers.uniform_random(shape=self.g_emb.shape), self.g_emb)
+ fluid.layers.uniform_random(shape=self.g_emb.shape),
+ self.g_emb)
hidden = fluid.data(name='hidden', shape=[None, self.hidden_size])
cell = fluid.data(name='cell', shape=[None, self.hidden_size])
- self.tokens = self._network(hidden, cell, is_inference=is_inference)
+ self.tokens = self._network(
+ hidden, cell, is_inference=is_inference)
with paddle.static.program_guard(self.learn_program):
hidden = fluid.data(name='hidden', shape=[None, self.hidden_size])
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/utils.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/utils.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/sa_controller.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/sa_controller.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/server.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/server.py
old mode 100644
new mode 100755
index a351ee478..abd34e29c
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/server.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/server.py
@@ -158,8 +158,8 @@ def run(self):
self._client.append(client_name)
self._lock.release()
- if len(self._client) == len(self._client_dict.items(
- )):
+ if len(self._client) == len(
+ self._client_dict.items()):
self._done = True
self._params_dict = sum_params_dict
del sum_params_dict
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/dygraph.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/dygraph.py
old mode 100644
new mode 100755
index 900bfddd7..396a2dd7e
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/dygraph.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/dygraph.py
@@ -121,7 +121,8 @@ def dygraph2program(layer,
# And should not create new varibles in 'extract_vars'.
out_var_list = extract_outputs_fn(original_outputs)
program_desc, feed_names, fetch_names, parameters = tracer.create_program_desc(
- input_var_list, feed_prefix, out_var_list, fetch_prefix, tmp_prefix)
+ input_var_list, feed_prefix, out_var_list, fetch_prefix,
+ tmp_prefix)
tracer.reset()
with _dygraph_guard(None):
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/graph_wrapper.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/graph_wrapper.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/registry.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/registry.py
old mode 100644
new mode 100755
index 2516278b5..8d222cf01
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/registry.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/core/registry.py
@@ -27,8 +27,8 @@ def get(self, key):
def _register_module(self, module_class):
module_name = module_class.__name__
if module_name in self._module_dict:
- raise KeyError('{} is already registered in {}.'.format(module_name,
- self.name))
+ raise KeyError('{} is already registered in {}.'.format(
+ module_name, self.name))
self._module_dict[module_name] = module_class
def register(self, cls):
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/filter_pruner.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/filter_pruner.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/fpgm_pruner.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/fpgm_pruner.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/l1norm_pruner.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/l1norm_pruner.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/l2norm_pruner.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/l2norm_pruner.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/pruner.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/pruner.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/pruning_plan.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/pruning_plan.py
old mode 100644
new mode 100755
index cacdcbf4b..0ef962d46
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/pruning_plan.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/pruning_plan.py
@@ -131,7 +131,8 @@ def _buffer_opt(self, param_name, sub_layer, opt):
backup_name = var_tmp.name.replace(".", "_") + "_backup"
if backup_name not in sub_layer._buffers:
sub_layer.register_buffer(
- backup_name, paddle.to_tensor(var_tmp.value().get_tensor()))
+ backup_name,
+ paddle.to_tensor(var_tmp.value().get_tensor()))
_logger.debug("Backup values of {} into buffers.".format(
var_tmp.name))
@@ -227,8 +228,9 @@ def imperative_apply(self, model, opt=None):
new_groups = int(except_num / filter_size)
sub_layer._origin_groups = sub_layer._groups
sub_layer._groups = new_groups
- _logger.info("change groups from {} to {} for {}.".
- format(groups, new_groups, param.name))
+ _logger.info(
+ "change groups from {} to {} for {}.".format(
+ groups, new_groups, param.name))
continue
# The name of buffer can not contains "."
@@ -265,7 +267,8 @@ def restore(self, model, opt=None):
for param in sub_layer.parameters(include_sublayers=False):
# restore optimizer accumulators from layer buffer
self._restore_opt(param.name, sub_layer, opt)
- backup_name = "_".join([param.name.replace(".", "_"), "backup"])
+ backup_name = "_".join(
+ [param.name.replace(".", "_"), "backup"])
if backup_name in sub_layer._buffers:
_logger.debug("Restore values of variable: {}".format(
param.name))
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/unstructured_pruner.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/unstructured_pruner.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/var_group.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/prune/var_group.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/quant/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/quant/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/quant/quanter.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/quant/quanter.py
old mode 100644
new mode 100755
index 9a4756577..b7698e377
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/quant/quanter.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/dygraph/quant/quanter.py
@@ -22,7 +22,8 @@
_logger = get_logger(__name__, level=logging.INFO)
WEIGHT_QUANTIZATION_TYPES = [
- 'abs_max', 'channel_wise_abs_max', 'range_abs_max', 'moving_average_abs_max'
+ 'abs_max', 'channel_wise_abs_max', 'range_abs_max',
+ 'moving_average_abs_max'
]
ACTIVATION_QUANTIZATION_TYPES = [
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/classification_models.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/classification_models.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/dygraph/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/dygraph/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/dygraph/resnet.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/dygraph/resnet.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/mobilenet.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/mobilenet.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/mobilenet_v2.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/mobilenet_v2.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/resnet.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/resnet.py
old mode 100644
new mode 100755
index 09578a629..4ceaef41e
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/resnet.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/resnet.py
@@ -138,7 +138,8 @@ def conv_bn_layer(self,
bn_name = "bn" + name[3:]
else:
if name.split("_")[1] == "conv1":
- bn_name = name.split("_", 1)[0] + "_bn_" + name.split("_", 1)[1]
+ bn_name = name.split("_", 1)[0] + "_bn_" + name.split("_",
+ 1)[1]
else:
bn_name = name.split("_", 1)[0] + "_bn" + name.split("_",
1)[1][3:]
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/slim_mobilenet.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/slim_mobilenet.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/slimfacenet.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/slimfacenet.py
old mode 100644
new mode 100755
index 47f92d670..5276a515c
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/slimfacenet.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/slimfacenet.py
@@ -297,7 +297,8 @@ def conv_bn_layer(self,
else:
return bn
- def arc_margin_product(self, input, label, out_dim, s=32.0, m=0.50, mode=2):
+ def arc_margin_product(self, input, label, out_dim, s=32.0, m=0.50,
+ mode=2):
input_norm = fluid.layers.sqrt(
fluid.layers.reduce_sum(
fluid.layers.square(input), dim=1))
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/util.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/models/util.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/architect.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/architect.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/architect_for_bert.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/architect_for_bert.py
old mode 100644
new mode 100755
index e2b878fd1..b1f6df261
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/architect_for_bert.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/architect_for_bert.py
@@ -38,9 +38,9 @@ def __init__(self, model, eta, arch_learning_rate, place, unrolled):
self.unrolled_model = self.model.new()
self.unrolled_model_params = [
p for p in self.unrolled_model.parameters()
- if p.name not in
- [a.name
- for a in self.unrolled_model.arch_parameters()] and p.trainable
+ if p.name not in [
+ a.name for a in self.unrolled_model.arch_parameters()
+ ] and p.trainable
]
self.unrolled_optimizer = fluid.optimizer.MomentumOptimizer(
self.eta,
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/get_genotype.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/get_genotype.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/cls.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/cls.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/model/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/model/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/model/cls.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/model/cls.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/batching.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/batching.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/cls.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/cls.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/pretraining.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/pretraining.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/squad.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/squad.py
old mode 100644
new mode 100755
index 79c2ca97d..651c46f96
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/squad.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/squad.py
@@ -93,7 +93,8 @@ def __init__(self,
self.is_impossible = is_impossible
-def read_squad_examples(input_file, is_training, version_2_with_negative=False):
+def read_squad_examples(input_file, is_training,
+ version_2_with_negative=False):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
@@ -218,8 +219,8 @@ def convert_examples_to_features(
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
- all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
- example.orig_answer_text)
+ all_doc_tokens, tok_start_position, tok_end_position,
+ tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
@@ -618,8 +619,8 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
- feature_null_score = result.start_logits[0] + result.end_logits[
- 0]
+ feature_null_score = result.start_logits[
+ 0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
@@ -638,7 +639,8 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
continue
if end_index not in feature.token_to_orig_map:
continue
- if not feature.token_is_max_context.get(start_index, False):
+ if not feature.token_is_max_context.get(start_index,
+ False):
continue
if end_index < start_index:
continue
@@ -676,8 +678,8 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
- tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1
- )]
+ tok_tokens = feature.tokens[pred.start_index:(pred.end_index +
+ 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end +
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/tokenization.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/reader/tokenization.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/utils/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/utils/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/utils/fp16.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/utils/fp16.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/utils/init.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/utils/init.py
old mode 100644
new mode 100755
index 6b69d87e7..52f9b3808
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/utils/init.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/search_space/conv_bert/utils/init.py
@@ -116,8 +116,8 @@ def load_numpy_weight(file_name):
_param0 = load_numpy_weight("pre_encoder_layer_norm_scale")
_param1 = load_numpy_weight("pre_encoder_layer_norm_bias")
- cls_model.bert_layer.pre_process_layer._sub_layers["layer_norm_0"].set_dict(
- {
+ cls_model.bert_layer.pre_process_layer._sub_layers[
+ "layer_norm_0"].set_dict({
"weight": _param0,
"bias": _param1
})
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/train_search.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/darts/train_search.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/early_stop/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/early_stop/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/early_stop/early_stop.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/early_stop/early_stop.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/early_stop/median_stop/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/early_stop/median_stop/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/early_stop/median_stop/median_stop.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/early_stop/median_stop/median_stop.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/gp_nas.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/gp_nas.py
old mode 100644
new mode 100755
index f6963cbfc..1215ebcda
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/gp_nas.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/gp_nas.py
@@ -131,8 +131,9 @@ def get_predict_jiont(self, X, X_train, Y_train):
mat_train = self._get_cor_mat(X_train)
mat_joint = self._get_cor_mat_joint(X, X_train)
- return m_X + mat_joint * np.linalg.inv(mat_train + self.hp_mat * np.eye(
- X_train.shape[0])) * (Y_train.T - m_X_train)
+ return m_X + mat_joint * np.linalg.inv(
+ mat_train + self.hp_mat * np.eye(X_train.shape[0])) * (Y_train.T -
+ m_X_train)
def get_initial_mean(self, X, Y):
"""
@@ -174,9 +175,9 @@ def get_posterior_mean(self, X, Y):
Y.T - X * self.w)
else:
self.w = np.linalg.inv(X.T * np.linalg.inv(
- cov_mat + self.hp_mat * np.eye(X.shape[0])) * X + np.linalg.inv(
- self.cov_w + self.hp_mat * np.eye(X.shape[
- 1])) + self.hp_mat * np.eye(X.shape[1])) * (
+ cov_mat + self.hp_mat * np.eye(X.shape[
+ 0])) * X + np.linalg.inv(self.cov_w + self.hp_mat * np.eye(
+ X.shape[1])) + self.hp_mat * np.eye(X.shape[1])) * (
X.T * np.linalg.inv(cov_mat + self.hp_mat * np.eye(
X.shape[0])) * Y.T +
np.linalg.inv(self.cov_w + self.hp_mat * np.eye(
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/convert_super.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/convert_super.py
old mode 100644
new mode 100755
index b08116f1b..c0f7ad401
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/convert_super.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/convert_super.py
@@ -127,7 +127,9 @@ def convert(self, network):
'stride', 'padding', 'dilation', 'groups', 'bias_attr'
]
if pd_ver == 185:
- new_attr_name += ['param_attr', 'use_cudnn', 'act', 'dtype']
+ new_attr_name += [
+ 'param_attr', 'use_cudnn', 'act', 'dtype'
+ ]
else:
new_attr_name += [
'weight_attr', 'data_format', 'padding_mode'
@@ -241,10 +243,10 @@ def convert(self, network):
layer = Block(SuperGroupConv2D(**new_attr_dict), key=key)
model[idx] = layer
- elif isinstance(layer,
- getattr(nn, 'BatchNorm2D', nn.BatchNorm)) and (
- getattr(self.context, 'expand', None) != None or
- getattr(self.context, 'channel', None) != None):
+ elif isinstance(
+ layer, getattr(nn, 'BatchNorm2D', nn.BatchNorm)) and (
+ getattr(self.context, 'expand', None) != None or
+ getattr(self.context, 'channel', None) != None):
# num_features in BatchNorm don't change after last weight operators
if idx > last_weight_layer_idx:
continue
@@ -254,8 +256,9 @@ def convert(self, network):
if pd_ver == 185:
new_attr_name += [
- 'param_attr', 'act', 'dtype', 'in_place', 'data_layout',
- 'is_test', 'use_global_stats', 'trainable_statistics'
+ 'param_attr', 'act', 'dtype', 'in_place',
+ 'data_layout', 'is_test', 'use_global_stats',
+ 'trainable_statistics'
]
else:
new_attr_name += ['weight_attr', 'data_format', 'name']
@@ -286,7 +289,8 @@ def convert(self, network):
layer = layers.SuperBatchNorm(
**new_attr_dict
- ) if pd_ver == 185 else layers.SuperBatchNorm2D(**new_attr_dict)
+ ) if pd_ver == 185 else layers.SuperBatchNorm2D(**
+ new_attr_dict)
model[idx] = layer
elif isinstance(layer, SyncBatchNorm) and (
@@ -342,7 +346,8 @@ def convert(self, network):
if pd_ver == 185:
new_attr_name += [
- 'output_size', 'param_attr', 'use_cudnn', 'act', 'dtype'
+ 'output_size', 'param_attr', 'use_cudnn', 'act',
+ 'dtype'
]
else:
new_attr_name += [
@@ -456,7 +461,8 @@ def convert(self, network):
'channel'] = cur_channel
new_attr_dict['groups'] = new_attr_dict[in_key[1:]]
layer = Block(
- SuperDepthwiseConv2DTranspose(**new_attr_dict), key=key)
+ SuperDepthwiseConv2DTranspose(**new_attr_dict),
+ key=key)
else:
### group conv_transpose
layer = Block(
@@ -533,10 +539,11 @@ def convert(self, network):
elif isinstance(
layer,
- getattr(nn, 'InstanceNorm2D',
- paddle.fluid.dygraph.nn.InstanceNorm)) and (
- getattr(self.context, 'expand', None) != None or
- getattr(self.context, 'channel', None) != None):
+ getattr(
+ nn, 'InstanceNorm2D',
+ paddle.fluid.dygraph.nn.InstanceNorm)) and (
+ getattr(self.context, 'expand', None) != None or
+ getattr(self.context, 'channel', None) != None):
# num_features in InstanceNorm don't change after last weight operators
if idx > last_weight_layer_idx:
continue
@@ -558,8 +565,8 @@ def convert(self, network):
new_key = '_num_channels' if '_num_channels' in new_attr_dict.keys(
) else '_num_features'
### 10 is a default channel in the case of weight_attr=False, in this condition, num of channels if useless, so give it arbitrarily.
- attr_dict[new_key] = layer._parameters['scale'].shape[0] if len(
- layer._parameters) != 0 else 10
+ attr_dict[new_key] = layer._parameters['scale'].shape[
+ 0] if len(layer._parameters) != 0 else 10
if self.context.expand:
new_attr_dict[new_key[1:]] = int(self.context.expand *
@@ -601,7 +608,8 @@ def convert(self, network):
new_attr_dict['normalized_shape'] = None
if self.context.expand:
new_attr_dict['normalized_shape'] = int(
- self.context.expand * attr_dict['_normalized_shape'][0])
+ self.context.expand *
+ attr_dict['_normalized_shape'][0])
elif self.context.channel:
new_attr_dict['normalized_shape'] = max(cur_channel)
else:
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/get_sub_model.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/get_sub_model.py
old mode 100644
new mode 100755
index 66fdd2c0b..ad5cad13f
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/get_sub_model.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/get_sub_model.py
@@ -77,8 +77,8 @@ def get_prune_params_config(graph, origin_model_config):
else:
if len(next_inp._var.
shape) > 1 and precedor != None:
- param_config[
- next_inp._var.name] = [precedor, None]
+ param_config[next_inp._var.
+ name] = [precedor, None]
else:
param_config[next_inp._var.name] = [precedor]
@@ -106,7 +106,8 @@ def prune_params(model, param_config, super_model_sd=None):
for l_name, sublayer in model.named_sublayers():
if isinstance(sublayer, BaseBlock):
continue
- for p_name, param in sublayer.named_parameters(include_sublayers=False):
+ for p_name, param in sublayer.named_parameters(
+ include_sublayers=False):
t_value = param.value().get_tensor()
value = np.array(t_value).astype("float32")
@@ -167,7 +168,8 @@ def _is_depthwise(op):
for inp in op.all_inputs():
if inp._var.persistable and (
op.attr('groups') == inp._var.shape[0] and
- op.attr('groups') * inp._var.shape[1] == inp._var.shape[0]):
+ op.attr('groups') * inp._var.shape[1] == inp._var.shape[0]
+ ):
return True
return False
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/layers.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/layers.py
old mode 100644
new mode 100755
index a0b1fe0b0..d274c54a4
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/layers.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/layers.py
@@ -190,7 +190,8 @@ def __init__(self,
'channel'] if 'channel' in candidate_config else None
self.base_channel = self._out_channels
if self.expand_ratio != None:
- self.base_channel = int(self._out_channels / max(self.expand_ratio))
+ self.base_channel = int(self._out_channels /
+ max(self.expand_ratio))
self.transform_kernel = transform_kernel
if self.ks_set != None:
@@ -239,7 +240,8 @@ def get_active_filter(self, in_nc, out_nc, kernel_size):
_input_filter = paddle.reshape(
_input_filter,
shape=[
- filters.shape[0], filters.shape[1], target_ks, target_ks
+ filters.shape[0], filters.shape[1], target_ks,
+ target_ks
])
start_filter = _input_filter
filters = start_filter
@@ -265,7 +267,8 @@ def get_groups_in_out_nc(self, in_nc, out_nc):
in_nc = int(in_nc // groups)
return groups, in_nc, out_nc
- def forward(self, input, kernel_size=None, expand_ratio=None, channel=None):
+ def forward(self, input, kernel_size=None, expand_ratio=None,
+ channel=None):
"""
Parameters:
input(Tensor): Input tensor.
@@ -296,7 +299,8 @@ def forward(self, input, kernel_size=None, expand_ratio=None, channel=None):
weight = self.get_active_filter(weight_in_nc, weight_out_nc, ks)
- if kernel_size != None or 'kernel_size' in self.candidate_config.keys():
+ if kernel_size != None or 'kernel_size' in self.candidate_config.keys(
+ ):
padding = convert_to_list(get_same_padding(ks), 2)
else:
padding = self._padding
@@ -493,7 +497,8 @@ def __init__(self,
'channel'] if 'channel' in candidate_config else None
self.base_channel = self._out_channels
if self.expand_ratio:
- self.base_channel = int(self._out_channels / max(self.expand_ratio))
+ self.base_channel = int(self._out_channels /
+ max(self.expand_ratio))
self.transform_kernel = transform_kernel
if self.ks_set != None:
@@ -540,7 +545,8 @@ def get_active_filter(self, in_nc, out_nc, kernel_size):
_input_filter = paddle.reshape(
_input_filter,
shape=[
- filters.shape[0], filters.shape[1], target_ks, target_ks
+ filters.shape[0], filters.shape[1], target_ks,
+ target_ks
])
start_filter = _input_filter
filters = start_filter
@@ -604,7 +610,8 @@ def forward(self,
weight = self.get_active_filter(weight_in_nc, weight_out_nc, ks)
- if kernel_size != None or 'kernel_size' in self.candidate_config.keys():
+ if kernel_size != None or 'kernel_size' in self.candidate_config.keys(
+ ):
padding = convert_to_list(get_same_padding(ks), 2)
else:
padding = self._padding
@@ -997,11 +1004,13 @@ def forward(self, input):
variance_out = variance
attrs = ("momentum", self._momentum, "epsilon", self._epsilon,
- "is_test", not self.training, "data_layout", self._data_format,
- "use_mkldnn", False, "fuse_with_relu", False,
- "use_global_stats", False, 'trainable_statistics', False)
+ "is_test", not self.training, "data_layout",
+ self._data_format, "use_mkldnn", False, "fuse_with_relu",
+ False, "use_global_stats", False, 'trainable_statistics',
+ False)
sync_batch_norm_out, _, _, _, _, _ = core.ops.sync_batch_norm(
- input, weight, bias, mean, variance, mean_out, variance_out, *attrs)
+ input, weight, bias, mean, variance, mean_out, variance_out,
+ *attrs)
return sync_batch_norm_out
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/layers_base.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/layers_base.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/layers_old.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/layers_old.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/ofa.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/ofa.py
old mode 100644
new mode 100755
index 477f815ab..841177928
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/ofa.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/ofa.py
@@ -283,7 +283,8 @@ def _prepare_distill(self):
sublayer._out_channels),
getattr(sublayer, '_num_filters',
sublayer._out_channels), 1)
- elif self.distill_config.mapping_op.lower() == 'linear':
+ elif self.distill_config.mapping_op.lower(
+ ) == 'linear':
netA = SuperLinear(
getattr(sublayer, '_output_dim',
sublayer._out_features),
@@ -346,8 +347,8 @@ def _sample_from_nestdict(self, cands, sample_type, task, phase):
if isinstance(v, dict):
sample_cands[k] = self._sample_from_nestdict(
v, sample_type=sample_type, task=task, phase=phase)
- elif isinstance(v, list) or isinstance(v, set) or isinstance(v,
- tuple):
+ elif isinstance(v, list) or isinstance(v, set) or isinstance(
+ v, tuple):
if sample_type == 'largest':
sample_cands[k] = v[-1]
elif sample_type == 'smallest':
@@ -486,7 +487,8 @@ def _export_sub_model_config(self, origin_model, config, input_shapes,
if name in config.keys():
origin_model_config[param.name] = config[name]
- param_prune_config = get_prune_params_config(graph, origin_model_config)
+ param_prune_config = get_prune_params_config(graph,
+ origin_model_config)
return param_prune_config
def export(self,
@@ -518,8 +520,8 @@ def export(self,
origin_model = self.model
origin_model = origin_model._layers if isinstance(
origin_model, DataParallel) else origin_model
- param_config = self._export_sub_model_config(origin_model, config,
- input_shapes, input_dtypes)
+ param_config = self._export_sub_model_config(
+ origin_model, config, input_shapes, input_dtypes)
prune_params(origin_model, param_config, super_sd)
return origin_model
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/utils/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/utils/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/utils/nlp_utils.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/utils/nlp_utils.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/utils/special_config.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/utils/special_config.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/utils/utils.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/ofa/utils/utils.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/one_shot/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/one_shot/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/one_shot/one_shot_nas.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/one_shot/one_shot_nas.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/one_shot/super_mnasnet.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/one_shot/super_mnasnet.py
old mode 100644
new mode 100755
index 920f5d86e..852b40383
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/one_shot/super_mnasnet.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/one_shot/super_mnasnet.py
@@ -209,14 +209,14 @@ def __init__(self,
def init_tokens(self):
return [
- 3, 3, 6, 6, 6, 6, 3, 3, 3, 6, 6, 6, 3, 3, 3, 3, 6, 6, 3, 3, 3, 6, 6,
- 6, 3, 3, 3, 6, 6, 6, 3, 6, 6, 6, 6, 6
+ 3, 3, 6, 6, 6, 6, 3, 3, 3, 6, 6, 6, 3, 3, 3, 3, 6, 6, 3, 3, 3, 6,
+ 6, 6, 3, 3, 3, 6, 6, 6, 3, 6, 6, 6, 6, 6
]
def range_table(self):
max_v = [
- 6, 6, 10, 10, 10, 10, 6, 6, 6, 10, 10, 10, 6, 6, 6, 6, 10, 10, 6, 6,
- 6, 10, 10, 10, 6, 6, 6, 10, 10, 10, 6, 10, 10, 10, 10, 10
+ 6, 6, 10, 10, 10, 10, 6, 6, 6, 10, 10, 10, 6, 6, 6, 6, 10, 10, 6,
+ 6, 6, 10, 10, 10, 6, 6, 6, 10, 10, 10, 6, 10, 10, 10, 10, 10
]
return (len(max_v) * [0], max_v)
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/rl_nas.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/rl_nas.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/sa_nas.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/sa_nas.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/base_layer.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/base_layer.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/combine_search_space.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/combine_search_space.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/darts_space.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/darts_space.py
old mode 100644
new mode 100755
index 29e91b5dd..ae2c5e3ec
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/darts_space.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/darts_space.py
@@ -160,7 +160,8 @@ def _cell(self,
if reduction_prev:
s0 = self._factorized_reduce(s0, filter_num, name=name + '/s-2')
else:
- s0 = self._relu_conv_bn(s0, filter_num, 1, 1, 0, name=name + '/s-2')
+ s0 = self._relu_conv_bn(
+ s0, filter_num, 1, 1, 0, name=name + '/s-2')
s1 = self._relu_conv_bn(s1, filter_num, 1, 1, 0, name=name + '/s-1')
if stride == 1:
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/inception_block.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/inception_block.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenet_block.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenet_block.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenetv1.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenetv1.py
old mode 100644
new mode 100755
index 263685cc8..28928a8cc
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenetv1.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenetv1.py
@@ -52,7 +52,8 @@ def __init__(self, input_size, output_size, block_num, block_mask):
[128, 144, 160, 192, 224, 256, 320, 384, 512, 576, 640, 704,
768]) #13
self.filter_num9 = np.array(
- [160, 192, 224, 256, 320, 384, 512, 640, 768, 832, 1024, 1048]) #12
+ [160, 192, 224, 256, 320, 384, 512, 640, 768, 832, 1024,
+ 1048]) #12
# self.k_size means kernel size
self.k_size = np.array([3, 5]) #2
# self.repeat means repeat_num in forth downsample
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenetv2.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenetv2.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/resnet.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/resnet.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/resnet_block.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/resnet_block.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/search_space_base.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/search_space_base.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/search_space_factory.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/search_space_factory.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/search_space_registry.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/search_space_registry.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/utils.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/utils.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/README.md b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/README.md
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/images/pantheon_arch.png b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/images/pantheon_arch.png
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/student.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/student.py
old mode 100644
new mode 100755
index 771e1e533..72bdd5e14
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/student.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/student.py
@@ -82,8 +82,9 @@ def register_teacher(self, in_path=None, in_address=None):
":" (e.g. "127.0.0.1:8080"). Default None.
"""
if self._started:
- raise ValueError("The student has been started and cannot register "
- "teacher no longer!")
+ raise ValueError(
+ "The student has been started and cannot register "
+ "teacher no longer!")
if in_path and in_address:
raise ValueError("Input path and input address should not "
"be given at the same time!")
@@ -247,7 +248,8 @@ def get_cmd():
knowledge_queue = Queue.Queue(100)
cmd_queue = Queue.Queue(5)
p = Thread(
- target=read_offline, args=(in_path, cmd_queue, knowledge_queue))
+ target=read_offline,
+ args=(in_path, cmd_queue, knowledge_queue))
p.daemon = True
p.start()
@@ -373,7 +375,8 @@ def get_knowledge_desc(self):
"teachers not in the merge_strategy.".format(idx))
knowledge_desc.update(desc)
- print("Knowledge merging strategy: {}".format(self._merge_strategy))
+ print("Knowledge merging strategy: {}".format(
+ self._merge_strategy))
print("Knowledge description after merging:")
for schema, desc in list(knowledge_desc.items()):
print("{}: {}".format(schema, desc))
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/teacher.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/teacher.py
old mode 100644
new mode 100755
index be24de193..8bf8910c5
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/teacher.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/teacher.py
@@ -517,8 +517,8 @@ def cast2fp16(know):
schema_in_feed[k] = v
else:
schema_in_fetch[k] = v
- schema_in_fetch_keys, schema_in_fetch_vars = zip(
- *list(schema_in_fetch.items()))
+ schema_in_fetch_keys, schema_in_fetch_vars = zip(*list(
+ schema_in_fetch.items()))
def know_maker(in_queue, out_queue, use_fp16):
while True:
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/utils.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/pantheon/utils.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/auto_pruner.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/auto_pruner.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/collections.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/collections.py
old mode 100644
new mode 100755
index 55888121f..93c9c9341
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/collections.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/collections.py
@@ -39,8 +39,9 @@ class PruningDetails(object):
"""
def __init__(self, var, axis, transform, op, is_parameter=True):
- assert (isinstance(var, VarWrapper),
- "name should be VarWrapper, but get type = ".format(type(var)))
+ assert (
+ isinstance(var, VarWrapper),
+ "name should be VarWrapper, but get type = ".format(type(var)))
assert (isinstance(axis, int))
self.name = var.name()
self.var = var
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/criterion.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/criterion.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/idx_selector.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/idx_selector.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/prune_io.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/prune_io.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/prune_worker.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/prune_worker.py
old mode 100644
new mode 100755
index edbb33f04..ccd29b755
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/prune_worker.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/prune_worker.py
@@ -134,8 +134,9 @@ def _prune_op(self, op, var, pruned_axis, pruned_idx, visited=None):
raise UnsupportOpError("Unsupported operator named {}".format(
op.type()))
- _logger.debug("\nfrom: {}\nto: {}\npruned_axis: {}; var: {}\ntrans: {}".
- format(self.op, op, pruned_axis, var.name(), pruned_idx))
+ _logger.debug(
+ "\nfrom: {}\nto: {}\npruned_axis: {}; var: {}\ntrans: {}".format(
+ self.op, op, pruned_axis, var.name(), pruned_idx))
_logger.debug(
f"visit {op.type()} by var [{var.name()}] on axis [{pruned_axis}];\t visited={self.visited}\n"
)
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/pruner.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/pruner.py
old mode 100644
new mode 100755
index 1d01aeff6..bb35fc0e3
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/pruner.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/pruner.py
@@ -37,7 +37,8 @@ class Pruner():
"""
- def __init__(self, criterion="l1_norm",
+ def __init__(self,
+ criterion="l1_norm",
idx_selector="default_idx_selector"):
if isinstance(criterion, str):
self.criterion = CRITERION.get(criterion)
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/sensitive.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/sensitive.py
old mode 100644
new mode 100755
index 33a9934b3..411ea0caf
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/sensitive.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/sensitive.py
@@ -26,7 +26,8 @@
_logger = get_logger(__name__, level=logging.INFO)
__all__ = [
- "sensitivity", "load_sensitivities", "merge_sensitive", "get_ratios_by_loss"
+ "sensitivity", "load_sensitivities", "merge_sensitive",
+ "get_ratios_by_loss"
]
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/unstructured_pruner.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/unstructured_pruner.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/quant/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/quant/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/bert.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/bert.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/cls.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/cls.py
old mode 100644
new mode 100755
index 1300d7c30..bdfef8b5b
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/cls.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/cls.py
@@ -78,8 +78,8 @@ def forward(self, data_ids):
input_mask = data_ids[3]
labels = data_ids[4]
- enc_outputs, next_sent_feats = self.bert_layer(src_ids, position_ids,
- sentence_ids, input_mask)
+ enc_outputs, next_sent_feats = self.bert_layer(
+ src_ids, position_ids, sentence_ids, input_mask)
if not self.return_pooled_out:
cls_feat = fluid.layers.dropout(
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/transformer_encoder.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/transformer_encoder.py
old mode 100644
new mode 100755
index 942bf5f18..a4b89f73d
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/transformer_encoder.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/model/transformer_encoder.py
@@ -37,7 +37,8 @@ def __init__(self, process_cmd, d_model, dropout_rate, name):
for cmd in self.process_cmd:
if cmd == "a": # add residual connection
- self.functors.append(lambda x, y: x + y if y is not None else x)
+ self.functors.append(
+ lambda x, y: x + y if y is not None else x)
self.exec_order += "a"
elif cmd == "n": # add layer normalization
self.functors.append(
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/batching.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/batching.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/cls.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/cls.py
old mode 100644
new mode 100755
index 04bd49566..cfdfaf38b
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/cls.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/cls.py
@@ -239,18 +239,20 @@ def split_seq_pair(self, data_ids):
ids_0 = ids[((sids == 0) & (ids != 0))]
seqlen_0 = ((sids == 0) & (ids != 0)).astype(np.int64).sum(1)
y_0 = np.concatenate([np.arange(s) for s in seqlen_0])
- x_0 = np.concatenate(
- [np.ones(
- [s], dtype=np.int64) * i for i, s in enumerate(seqlen_0)])
+ x_0 = np.concatenate([
+ np.ones(
+ [s], dtype=np.int64) * i for i, s in enumerate(seqlen_0)
+ ])
ids0 = np.zeros([batchsize, seqlen_0.max()], dtype=np.int64)
ids0[(x_0, y_0)] = ids_0
ids_1 = ids[(sids == 1) & (ids != 0)]
seqlen_1 = ((sids == 1) & (ids != 0)).astype(np.int64).sum(1)
y_1 = np.concatenate([np.arange(s) for s in seqlen_1])
- x_1 = np.concatenate(
- [np.ones(
- [s], dtype=np.int64) * i for i, s in enumerate(seqlen_1)])
+ x_1 = np.concatenate([
+ np.ones(
+ [s], dtype=np.int64) * i for i, s in enumerate(seqlen_1)
+ ])
ids1 = np.zeros([batchsize, seqlen_1.max()], dtype=np.int64)
ids1[(x_1, y_1)] = ids_1
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/tokenization.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/reader/tokenization.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/utils/__init__.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/utils/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/utils/fp16.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/utils/fp16.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/utils/init.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/utils/init.py
old mode 100644
new mode 100755
index 6b69d87e7..52f9b3808
--- a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/utils/init.py
+++ b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/teachers/bert/utils/init.py
@@ -116,8 +116,8 @@ def load_numpy_weight(file_name):
_param0 = load_numpy_weight("pre_encoder_layer_norm_scale")
_param1 = load_numpy_weight("pre_encoder_layer_norm_bias")
- cls_model.bert_layer.pre_process_layer._sub_layers["layer_norm_0"].set_dict(
- {
+ cls_model.bert_layer.pre_process_layer._sub_layers[
+ "layer_norm_0"].set_dict({
"weight": _param0,
"bias": _param1
})
diff --git a/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/version.py b/transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/version.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/README.md b/transformer_courses/BERT_distillation/README.md
old mode 100644
new mode 100755
diff --git a/transformer_courses/BERT_distillation/README_en.md b/transformer_courses/BERT_distillation/README_en.md
old mode 100644
new mode 100755
diff --git a/transformer_courses/README.md b/transformer_courses/README.md
old mode 100644
new mode 100755
diff --git a/transformer_courses/Transformer_Machine_Translation/README.md b/transformer_courses/Transformer_Machine_Translation/README.md
old mode 100644
new mode 100755
index 08e7221eb..ec90cc22a
--- a/transformer_courses/Transformer_Machine_Translation/README.md
+++ b/transformer_courses/Transformer_Machine_Translation/README.md
@@ -69,4 +69,4 @@ Transformer 是 Google 团队在 17 年 6 月提出的 NLP 经典之作,由 As
```bash
python predict.py
- ```
\ No newline at end of file
+ ```
diff --git a/transformer_courses/Transformer_Machine_Translation/README_en.md b/transformer_courses/Transformer_Machine_Translation/README_en.md
old mode 100644
new mode 100755
index eca13922e..0b9f63cf3
--- a/transformer_courses/Transformer_Machine_Translation/README_en.md
+++ b/transformer_courses/Transformer_Machine_Translation/README_en.md
@@ -71,4 +71,4 @@ The model prediction process can be started as follows
```bash
python predict.py
- ```
\ No newline at end of file
+ ```
diff --git a/transformer_courses/Transformer_Machine_Translation/bpe_process.py b/transformer_courses/Transformer_Machine_Translation/bpe_process.py
old mode 100644
new mode 100755
index 864a8fcf1..1e6619092
--- a/transformer_courses/Transformer_Machine_Translation/bpe_process.py
+++ b/transformer_courses/Transformer_Machine_Translation/bpe_process.py
@@ -14,40 +14,32 @@
import jieba
+
# 中文Jieba分词
-def jieba_cut(in_file,out_file):
- out_f = open(out_file,'w',encoding='utf8')
- with open(in_file,'r',encoding='utf8') as f:
+def jieba_cut(in_file, out_file):
+ out_f = open(out_file, 'w', encoding='utf8')
+ with open(in_file, 'r', encoding='utf8') as f:
for line in f.readlines():
line = line.strip()
if not line:
continue
cut_line = ' '.join(jieba.cut(line))
- out_f.write(cut_line+'\n')
+ out_f.write(cut_line + '\n')
out_f.close()
-zn_dir='zh-en/train.tags.zh-en.zh.txt'
-cut_zn_dir='zh-en/train.tags.zh-en.zh.cut.txt'
-jieba_cut(zn_dir,cut_zn_dir)
-
-zn_dir='dev_cn.txt'
-cut_zn_dir='dev_cn.cut.txt'
-jieba_cut(zn_dir,cut_zn_dir)
-
-zn_dir='dev_cn.txt'
-cut_zn_dir='dev_cn.cut.txt'
-jieba_cut(zn_dir,cut_zn_dir)
-
-zn_dir='test_cn.txt'
-cut_zn_dir='test_cn.cut.txt'
-jieba_cut(zn_dir,cut_zn_dir)
-
-
-
-
-
-
+zn_dir = 'zh-en/train.tags.zh-en.zh.txt'
+cut_zn_dir = 'zh-en/train.tags.zh-en.zh.cut.txt'
+jieba_cut(zn_dir, cut_zn_dir)
+zn_dir = 'dev_cn.txt'
+cut_zn_dir = 'dev_cn.cut.txt'
+jieba_cut(zn_dir, cut_zn_dir)
+zn_dir = 'dev_cn.txt'
+cut_zn_dir = 'dev_cn.cut.txt'
+jieba_cut(zn_dir, cut_zn_dir)
+zn_dir = 'test_cn.txt'
+cut_zn_dir = 'test_cn.cut.txt'
+jieba_cut(zn_dir, cut_zn_dir)
diff --git a/transformer_courses/Transformer_Machine_Translation/bpe_process2.py b/transformer_courses/Transformer_Machine_Translation/bpe_process2.py
old mode 100644
new mode 100755
index b082e3e40..4dfbed07d
--- a/transformer_courses/Transformer_Machine_Translation/bpe_process2.py
+++ b/transformer_courses/Transformer_Machine_Translation/bpe_process2.py
@@ -12,27 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-special_token=['','','']
-cn_vocab=[]
+special_token = ['', '', '']
+cn_vocab = []
with open('zh-en/temp1') as f:
for item in f.readlines():
- words=item.strip().split()
+ words = item.strip().split()
cn_vocab.append(words[0])
-with open('zh-en/vocab.ch.src','w') as f:
+with open('zh-en/vocab.ch.src', 'w') as f:
for item in special_token:
- f.write(item+'\n')
+ f.write(item + '\n')
for item in cn_vocab:
- f.write(item+'\n')
+ f.write(item + '\n')
-eng_vocab=[]
+eng_vocab = []
with open('zh-en/temp2') as f:
for item in f.readlines():
- words=item.strip().split()
+ words = item.strip().split()
eng_vocab.append(words[0])
-with open('zh-en/vocab.en.tgt','w') as f:
+with open('zh-en/vocab.en.tgt', 'w') as f:
for item in special_token:
- f.write(item+'\n')
+ f.write(item + '\n')
for item in eng_vocab:
- f.write(item+'\n')
+ f.write(item + '\n')
diff --git a/transformer_courses/Transformer_Machine_Translation/data_process.py b/transformer_courses/Transformer_Machine_Translation/data_process.py
old mode 100644
new mode 100755
index dfe2c2818..3379b0a40
--- a/transformer_courses/Transformer_Machine_Translation/data_process.py
+++ b/transformer_courses/Transformer_Machine_Translation/data_process.py
@@ -12,54 +12,52 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
from tqdm import tqdm
import os
import xml.etree.ElementTree as ET
+def filter_out_html(filename1, filename2):
+ f1 = open(filename1, 'r')
+ f2 = open(filename2, 'r')
+ data1 = f1.readlines()
+ data2 = f2.readlines()
+ assert len(data1) == len(data2) #用codecs会导致报错不知道为什么
+ fw1 = open(filename1 + ".txt", 'w')
+ fw2 = open(filename2 + ".txt", 'w')
-def filter_out_html(filename1,filename2):
- f1 = open(filename1,'r')
- f2 = open(filename2,'r')
-
- data1 = f1.readlines()
- data2 = f2.readlines()
- assert len(data1)==len(data2)#用codecs会导致报错不知道为什么
- fw1 = open(filename1+".txt",'w')
- fw2 = open(filename2+".txt",'w')
+ for line1, line2 in tqdm(zip(data1, data2)):
+ line1 = line1.strip()
+ line2 = line2.strip()
+ if line1 and line2:
+ if '<' not in line1 and '>' not in line1 and '<' not in line2 and '>' not in line2:
+ fw1.write(line1 + "\n")
+ fw2.write(line2 + "\n")
+ fw1.close()
+ f1.close()
+ fw2.close()
+ f2.close()
- for line1,line2 in tqdm(zip(data1,data2)):
- line1 = line1.strip()
- line2 = line2.strip()
- if line1 and line2:
- if '<' not in line1 and '>' not in line1 and '<' not in line2 and '>' not in line2:
- fw1.write(line1+"\n")
- fw2.write(line2+"\n")
- fw1.close()
- f1.close()
- fw2.close()
- f2.close()
+ return filename1 + ".txt", filename2 + ".txt"
- return filename1+".txt",filename2+".txt"
-en_dir='zh-en/train.tags.zh-en.en'
-zn_dir='zh-en/train.tags.zh-en.zh'
-filter_out_html(en_dir,zn_dir)
+en_dir = 'zh-en/train.tags.zh-en.en'
+zn_dir = 'zh-en/train.tags.zh-en.zh'
+filter_out_html(en_dir, zn_dir)
tree_source_dev = ET.parse('zh-en/IWSLT15.TED.dev2010.zh-en.zh.xml')
tree_source_dev = [seg.text for seg in tree_source_dev.iter('seg')]
tree_target_dev = ET.parse('zh-en/IWSLT15.TED.dev2010.zh-en.en.xml')
tree_target_dev = [seg.text for seg in tree_target_dev.iter('seg')]
-with open('dev_cn.txt','w') as f:
+with open('dev_cn.txt', 'w') as f:
for item in tree_source_dev:
- f.write(item+'\n')
+ f.write(item + '\n')
-with open('dev_en.txt','w') as f:
+with open('dev_en.txt', 'w') as f:
for item in tree_target_dev:
- f.write(item+'\n')
+ f.write(item + '\n')
tree_source_test = ET.parse('zh-en/IWSLT15.TED.tst2011.zh-en.zh.xml')
tree_source_test = [seg.text for seg in tree_source_test.iter('seg')]
@@ -67,13 +65,10 @@ def filter_out_html(filename1,filename2):
tree_target_test = ET.parse('zh-en/IWSLT15.TED.tst2011.zh-en.en.xml')
tree_target_test = [seg.text for seg in tree_target_test.iter('seg')]
-with open('test_cn.txt','w') as f:
+with open('test_cn.txt', 'w') as f:
for item in tree_source_test:
- f.write(item+'\n')
+ f.write(item + '\n')
-with open('test_en.txt','w') as f:
+with open('test_en.txt', 'w') as f:
for item in tree_target_test:
- f.write(item+'\n')
-
-
-
+ f.write(item + '\n')
diff --git a/transformer_courses/Transformer_Machine_Translation/dataloader.py b/transformer_courses/Transformer_Machine_Translation/dataloader.py
old mode 100644
new mode 100755
index e24e7eea6..52cbaadee
--- a/transformer_courses/Transformer_Machine_Translation/dataloader.py
+++ b/transformer_courses/Transformer_Machine_Translation/dataloader.py
@@ -1,4 +1,3 @@
-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,6 +21,7 @@
from paddlenlp.data.sampler import SamplerHelper
from paddlenlp.datasets import load_dataset
+
def min_max_filer(data, max_len, min_len=0):
# 1 for special tokens.
data_min_len = min(len(data[0]), len(data[1])) + 1
@@ -36,9 +36,11 @@ def read(src_path, tgt_path, is_predict=False):
src_line = src_line.strip()
if not src_line:
continue
- yield {'src':src_line, 'tgt':''}
+ yield {'src': src_line, 'tgt': ''}
else:
- with open(src_path, 'r', encoding='utf8') as src_f, open(tgt_path, 'r', encoding='utf8') as tgt_f:
+ with open(
+ src_path, 'r', encoding='utf8') as src_f, open(
+ tgt_path, 'r', encoding='utf8') as tgt_f:
for src_line, tgt_line in zip(src_f.readlines(), tgt_f.readlines()):
src_line = src_line.strip()
if not src_line:
@@ -46,7 +48,8 @@ def read(src_path, tgt_path, is_predict=False):
tgt_line = tgt_line.strip()
if not tgt_line:
continue
- yield {'src':src_line, 'tgt':tgt_line}
+ yield {'src': src_line, 'tgt': tgt_line}
+
def prepare_train_input(insts, bos_idx, eos_idx, pad_idx):
"""
@@ -65,10 +68,18 @@ def prepare_train_input(insts, bos_idx, eos_idx, pad_idx):
# 创建训练集、验证集的dataloader
def create_data_loader(args):
- train_dataset = load_dataset(read, src_path=args.training_file.split(',')[0], tgt_path=args.training_file.split(',')[1], lazy=False)
- dev_dataset = load_dataset(read, src_path=args.training_file.split(',')[0], tgt_path=args.training_file.split(',')[1], lazy=False)
+ train_dataset = load_dataset(
+ read,
+ src_path=args.training_file.split(',')[0],
+ tgt_path=args.training_file.split(',')[1],
+ lazy=False)
+ dev_dataset = load_dataset(
+ read,
+ src_path=args.training_file.split(',')[0],
+ tgt_path=args.training_file.split(',')[1],
+ lazy=False)
print('load src vocab')
- print( args.src_vocab_fpath)
+ print(args.src_vocab_fpath)
src_vocab = Vocab.load_vocabulary(
args.src_vocab_fpath,
bos_token=args.special_token[0],
@@ -88,6 +99,7 @@ def create_data_loader(args):
args.src_vocab_size = padding_vocab(len(src_vocab))
args.trg_vocab_size = padding_vocab(len(trg_vocab))
print('convert example')
+
def convert_samples(sample):
source = sample['src'].split()
target = sample['tgt'].split()
@@ -145,7 +157,8 @@ def convert_samples(sample):
data_loaders[i] = (data_loader)
return data_loaders
+
class SortType(object):
GLOBAL = 'global'
POOL = 'pool'
- NONE = "none"
\ No newline at end of file
+ NONE = "none"
diff --git a/transformer_courses/Transformer_Machine_Translation/predict.py b/transformer_courses/Transformer_Machine_Translation/predict.py
old mode 100644
new mode 100755
index a5fb0da4f..985923620
--- a/transformer_courses/Transformer_Machine_Translation/predict.py
+++ b/transformer_courses/Transformer_Machine_Translation/predict.py
@@ -1,4 +1,3 @@
-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -41,21 +40,30 @@ def read(src_path, tgt_path, is_predict=False):
src_line = src_line.strip()
if not src_line:
continue
- yield {'src':src_line, 'tgt':''}
+ yield {'src': src_line, 'tgt': ''}
else:
- with open(src_path, 'r', encoding='utf8') as src_f, open(tgt_path, 'r', encoding='utf8') as tgt_f:
- for src_line, tgt_line in zip(src_f.readlines(), tgt_f.readlines()):
+ with open(
+ src_path, 'r', encoding='utf8') as src_f, open(
+ tgt_path, 'r', encoding='utf8') as tgt_f:
+ for src_line, tgt_line in zip(src_f.readlines(),
+ tgt_f.readlines()):
src_line = src_line.strip()
if not src_line:
continue
tgt_line = tgt_line.strip()
if not tgt_line:
continue
- yield {'src':src_line, 'tgt':tgt_line}
+ yield {'src': src_line, 'tgt': tgt_line}
+
# 创建测试集的dataloader
def create_infer_loader(args):
- dataset = load_dataset(read, src_path=args.predict_file, tgt_path=None, is_predict=True, lazy=False)
+ dataset = load_dataset(
+ read,
+ src_path=args.predict_file,
+ tgt_path=None,
+ is_predict=True,
+ lazy=False)
src_vocab = Vocab.load_vocabulary(
args.src_vocab_fpath,
@@ -110,7 +118,9 @@ def prepare_infer_input(insts, bos_idx, eos_idx, pad_idx):
return [src_word, ]
-def post_process_seq(seq, bos_idx, eos_idx, output_bos=False, output_eos=False):
+
+def post_process_seq(seq, bos_idx, eos_idx, output_bos=False,
+ output_eos=False):
"""
Post-process the decoded sequence.
"""
@@ -156,7 +166,7 @@ def do_predict(args):
# Load the trained model
# assert args.init_from_params, (
# "Please set init_from_params to load the infer model.")
- init_from_params='trained_models/step_final'
+ init_from_params = 'trained_models/step_final'
model_dict = paddle.load(
os.path.join(init_from_params, "transformer.pdparams"))
@@ -180,12 +190,14 @@ def do_predict(args):
for beam_idx, beam in enumerate(ins):
if beam_idx >= args.n_best:
break
- id_list = post_process_seq(beam, args.bos_idx, args.eos_idx)
+ id_list = post_process_seq(beam, args.bos_idx,
+ args.eos_idx)
word_list = to_tokens(id_list)
sequence = " ".join(word_list) + "\n"
f.write(sequence)
f.close()
+
if __name__ == '__main__':
# 读入参数
yaml_file = './transformer.base.yaml'
@@ -193,4 +205,4 @@ def do_predict(args):
args = AttrDict(yaml.safe_load(f))
pprint(args)
- do_predict(args)
\ No newline at end of file
+ do_predict(args)
diff --git a/transformer_courses/Transformer_Machine_Translation/requirements.txt b/transformer_courses/Transformer_Machine_Translation/requirements.txt
old mode 100644
new mode 100755
diff --git a/transformer_courses/Transformer_Machine_Translation/subword.sh b/transformer_courses/Transformer_Machine_Translation/subword.sh
old mode 100644
new mode 100755
diff --git a/transformer_courses/Transformer_Machine_Translation/train.py b/transformer_courses/Transformer_Machine_Translation/train.py
old mode 100644
new mode 100755
index ca0b30d71..4554efafd
--- a/transformer_courses/Transformer_Machine_Translation/train.py
+++ b/transformer_courses/Transformer_Machine_Translation/train.py
@@ -1,4 +1,3 @@
-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -28,7 +27,7 @@
import paddle.distributed as dist
-def do_train(args,train_loader,eval_loader):
+def do_train(args, train_loader, eval_loader):
if args.use_gpu:
rank = dist.get_rank()
trainer_count = dist.get_world_size()
@@ -45,8 +44,6 @@ def do_train(args,train_loader,eval_loader):
if random_seed is not None:
paddle.seed(random_seed)
-
-
# Define model
transformer = TransformerModel(
src_vocab_size=args.src_vocab_size,
@@ -198,6 +195,7 @@ def do_train(args,train_loader,eval_loader):
paddle.save(optimizer.state_dict(),
os.path.join(model_dir, "transformer.pdopt"))
+
if __name__ == '__main__':
# 读入参数
yaml_file = './transformer.base.yaml'
@@ -209,4 +207,4 @@ def do_train(args,train_loader,eval_loader):
(train_loader), (eval_loader) = create_data_loader(args)
print('training the model')
- do_train(args,train_loader,eval_loader)
\ No newline at end of file
+ do_train(args, train_loader, eval_loader)
diff --git a/transformer_courses/Transformer_Machine_Translation/transformer.base.yaml b/transformer_courses/Transformer_Machine_Translation/transformer.base.yaml
old mode 100644
new mode 100755
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/README.md b/transformer_courses/Transformer_Punctuation_Restoration/README.md
old mode 100644
new mode 100755
index 9f0806ab5..49d22b88c
--- a/transformer_courses/Transformer_Punctuation_Restoration/README.md
+++ b/transformer_courses/Transformer_Punctuation_Restoration/README.md
@@ -3,19 +3,19 @@
## 依赖模块
- python3
-- paddlenlp==2.0.0rc22
+- paddlenlp==2.0.0rc22
- paddlepaddle==2.1.1
- pandas
- attrdict==2.0.1
- ujson
- tqdm
-- paddlepaddle-gpu
+- paddlepaddle-gpu
## 项目介绍
```
|-data_transfer.py: 将测试集和训练集数据从xml格式提取成txt形式
-|-data_process.py: 数据集预处理,并且分别构建训练,验证以及测试数据集
+|-data_process.py: 数据集预处理,并且分别构建训练,验证以及测试数据集
|-dataloader.py: 包含构建dataloader的方法
|-train.py: 构建dataloader,加载预训练模型,设置AdamW优化器,cross entropy损失函数以及评估方式,并且开始ELECTRA的训练,并且在验证集上评估
|-predict.py: 启动模型预测的脚本,并且储存预测结果于txt文件
@@ -55,7 +55,7 @@ ELECTRA 是由 Kevin Clark 等人(Standfold 和 Google 团队)在 ICLR 2020
- 请按照如下格式组织数据集
```
- data
+ data
|_ IWSLT12.TED.MT.tst2011.en-fr.en.xml
|_ IWSLT12.TED.SLT.tst2011.en-fr.en.system0.comma.xml
|_ IWSLT12.TALK.dev2010.en-fr.en.xml
@@ -68,7 +68,7 @@ ELECTRA 是由 Kevin Clark 等人(Standfold 和 Google 团队)在 ICLR 2020
```bash
python data_transfer.py
python data_process.py
- ```
+ ```
## 模型训练与评估
@@ -89,4 +89,4 @@ ELECTRA 是由 Kevin Clark 等人(Standfold 和 Google 团队)在 ICLR 2020
```bash
python predict.py
- ```
\ No newline at end of file
+ ```
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/README_en.md b/transformer_courses/Transformer_Punctuation_Restoration/README_en.md
old mode 100644
new mode 100755
index 7081e269c..31d926aa8
--- a/transformer_courses/Transformer_Punctuation_Restoration/README_en.md
+++ b/transformer_courses/Transformer_Punctuation_Restoration/README_en.md
@@ -3,13 +3,13 @@
## Dependent packages
- python3
-- paddlenlp==2.0.0rc22
+- paddlenlp==2.0.0rc22
- paddlepaddle==2.1.1
- pandas
- attrdict==2.0.1
- ujson
- tqdm
-- paddlepaddle-gpu
+- paddlepaddle-gpu
## Project Introduction
@@ -47,12 +47,12 @@ This experiment uses Discriminator to do the punctuation restoration task. Punct
- Download [IWSLT12.zip data set](https://aistudio.baidu.com/aistudio/datasetdetail/98318) and unzip it to the `data` directory
- ```
+ ```
mkdir data && cd data
unzip IWSLT12.zip
cd ../
```
-- Please organize the data set in the following format
+- Please organize the data set in the following format
```
data
@@ -87,4 +87,4 @@ This experiment uses Discriminator to do the punctuation restoration task. Punct
```bash
python predict.py
- ```
\ No newline at end of file
+ ```
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/data_process.py b/transformer_courses/Transformer_Punctuation_Restoration/data_process.py
old mode 100644
new mode 100755
index c940023e7..18a3ecbe3
--- a/transformer_courses/Transformer_Punctuation_Restoration/data_process.py
+++ b/transformer_courses/Transformer_Punctuation_Restoration/data_process.py
@@ -11,25 +11,25 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
'''
预处理数据,并且构建数据集
'''
-import yaml
-import argparse
+import yaml
+import argparse
from pprint import pprint
from attrdict import AttrDict
-from tqdm import tqdm
+from tqdm import tqdm
import ujson
import codecs
import os
import re
-import pandas as pd
+import pandas as pd
from paddlenlp.transformers import ElectraForTokenClassification, ElectraTokenizer
from paddlenlp.data import Stack, Tuple, Pad, Dict
+
def clean_text(text):
'''
文本处理:将符号替换为’‘,’.‘,','以及‘?’之一
@@ -37,14 +37,14 @@ def clean_text(text):
text = text.replace('!', '.')
text = text.replace(':', ',')
text = text.replace('--', ',')
-
+
reg = "(?<=[a-zA-Z])-(?=[a-zA-Z]{2,})"
r = re.compile(reg, re.DOTALL)
text = r.sub(' ', text)
-
+
text = re.sub(r'\s-\s', ' , ', text)
-
-# text = text.replace('-', ',')
+
+ # text = text.replace('-', ',')
text = text.replace(';', '.')
text = text.replace(' ,', ',')
text = text.replace('♫', '')
@@ -54,19 +54,20 @@ def clean_text(text):
text = re.sub(r'--\s?--', '', text)
text = re.sub(r'\s+', ' ', text)
-
+
text = re.sub(r',\s?,', ',', text)
text = re.sub(r',\s?\.', '.', text)
text = re.sub(r'\?\s?\.', '?', text)
text = re.sub(r'\s+', ' ', text)
-
+
text = re.sub(r'\s+\?', '?', text)
text = re.sub(r'\s+,', ',', text)
text = re.sub(r'\.[\s+\.]+', '. ', text)
text = re.sub(r'\s+\.', '.', text)
-
+
return text.strip().lower()
+
def format_data(train_text):
'''
依据文本中出现的符号,分别生成文本tokens以及对应标签
@@ -74,101 +75,110 @@ def format_data(train_text):
texts:文本tokens列表,每一个item是一个文本样本对应的tokens列表
labels:标点符号标签列表,每一个item是一个标点符号标签列表,代表token的下一个位置的标点符号
'''
- labels=[]
- texts=[]
+ labels = []
+ texts = []
for line in tqdm(train_text):
- line=line.strip()
- if(len(line)==2):
+ line = line.strip()
+ if (len(line) == 2):
print(line)
continue
- text=tokenizer.tokenize(line)
- label=[]
- cur_text=[]
- flag=True
+ text = tokenizer.tokenize(line)
+ label = []
+ cur_text = []
+ flag = True
for item in text:
- if(item in punctuation_enc):
+ if (item in punctuation_enc):
# print(item)
- if(len(label)>0):
+ if (len(label) > 0):
label.pop()
label.append(punctuation_enc[item])
else:
print(text)
- falg=False
+ falg = False
break
else:
cur_text.append(item)
label.append(punctuation_enc['O'])
- if(flag):
+ if (flag):
labels.append(label)
texts.append(cur_text)
- return texts,labels
+ return texts, labels
+
# def write_json(filename, dataset):
# print('write to'+filename)
# with codecs.open(filename, mode="w", encoding="utf-8") as f:
# ujson.dump(dataset, f)
-def output_to_tsv(texts,labels,file_name):
- data=[]
- for text,label in zip(texts,labels):
- if(len(text)!=len(label)):
+
+def output_to_tsv(texts, labels, file_name):
+ data = []
+ for text, label in zip(texts, labels):
+ if (len(text) != len(label)):
print(text)
print(label)
continue
- data.append([' '.join(text),' '.join(label)])
- df=pd.DataFrame(data,columns=['text_a','label'])
- df.to_csv(file_name,index=False,sep='\t')
-
-def output_to_train_tsv(texts,labels,file_name):
- data=[]
- for text,label in zip(texts,labels):
- if(len(text)!=len(label)):
+ data.append([' '.join(text), ' '.join(label)])
+ df = pd.DataFrame(data, columns=['text_a', 'label'])
+ df.to_csv(file_name, index=False, sep='\t')
+
+
+def output_to_train_tsv(texts, labels, file_name):
+ data = []
+ for text, label in zip(texts, labels):
+ if (len(text) != len(label)):
print(text)
print(label)
continue
- if(len(text)==0):
+ if (len(text) == 0):
continue
- data.append([' '.join(text),' '.join(label)])
+ data.append([' '.join(text), ' '.join(label)])
# data=data[65000:70000]
- df=pd.DataFrame(data,columns=['text_a','label'])
- df.to_csv(file_name,index=False,sep='\t')
+ df = pd.DataFrame(data, columns=['text_a', 'label'])
+ df.to_csv(file_name, index=False, sep='\t')
+
-if __name__ == '__main__':
+if __name__ == '__main__':
# 读入参数
yaml_file = './electra.base.yaml'
with open(yaml_file, 'rt') as f:
args = AttrDict(yaml.safe_load(f))
# 数据读取
- with open(args.data_path + args.output_train_path, 'r', encoding='utf-8') as f:
+ with open(
+ args.data_path + args.output_train_path, 'r',
+ encoding='utf-8') as f:
train_text = f.readlines()
- with open(args.data_path + args.output_dev_path, 'r', encoding='utf-8') as f:
+ with open(
+ args.data_path + args.output_dev_path, 'r', encoding='utf-8') as f:
valid_text = f.readlines()
- with open(args.data_path + args.output_test_path, 'r', encoding='utf-8') as f:
+ with open(
+ args.data_path + args.output_test_path, 'r',
+ encoding='utf-8') as f:
test_text = f.readlines()
-
+
datasets = train_text, valid_text, test_text
-
+
datasets = [[clean_text(text) for text in ds] for ds in datasets]
# 利用electra的分词工具进行分词,然后构造数据集
- model_name_or_path=args.model_name_or_path
+ model_name_or_path = args.model_name_or_path
tokenizer = ElectraTokenizer.from_pretrained(model_name_or_path)
punctuation_enc = {
- 'O': '0',
- ',': '1',
- '.': '2',
- '?': '3',
- }
-
+ 'O': '0',
+ ',': '1',
+ '.': '2',
+ '?': '3',
+ }
+
# # 以一个文本序列为例,构建模型需要的数据集
# example_sentence="all the projections [ say that ] this one [ billion ] will [ only ] grow with one to two or three percent"
-
+
# print('Use the example sentence to create the dataset', example_sentence)
-
+
# example_text=tokenizer.tokenize(example_sentence)
# print(example_text)
@@ -187,9 +197,9 @@ def output_to_train_tsv(texts,labels,file_name):
# print(cur_text)
# print(len(label))
# print(len(cur_text))
-
+
# 构建训练集
- train_texts,train_labels=format_data(train_text)
+ train_texts, train_labels = format_data(train_text)
# print(len(train_texts))
# print(train_texts[0])
@@ -199,7 +209,7 @@ def output_to_train_tsv(texts,labels,file_name):
output_to_train_tsv(train_texts, train_labels, args.output_train_tsv)
# 构建测试集,导出测试集到指定路径
- test_texts,test_labels=format_data(test_text)
+ test_texts, test_labels = format_data(test_text)
output_to_tsv(test_texts, test_labels, args.output_test_tsv)
# print(len(test_texts))
@@ -207,7 +217,7 @@ def output_to_train_tsv(texts,labels,file_name):
# print(labels[0])
# 构建验证集,导出验证集到指定路径
- valid_texts, valid_labels=format_data(valid_text)
+ valid_texts, valid_labels = format_data(valid_text)
output_to_tsv(valid_texts, valid_labels, args.output_dev_tsv)
# 测试
@@ -218,6 +228,6 @@ def output_to_train_tsv(texts,labels,file_name):
# raw_path='.'
# train_file = os.path.join(raw_path, args.output_train_tsv)
# dev_file = os.path.join(raw_path, args.output_dev_tsv)
-
+
# train_data=pd.read_csv(train_file,sep='\t')
# train_data.head()
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/data_transfer.py b/transformer_courses/Transformer_Punctuation_Restoration/data_transfer.py
old mode 100644
new mode 100755
index 801f6d9c0..731b99de4
--- a/transformer_courses/Transformer_Punctuation_Restoration/data_transfer.py
+++ b/transformer_courses/Transformer_Punctuation_Restoration/data_transfer.py
@@ -14,18 +14,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import yaml
-import argparse
+import yaml
+import argparse
from pprint import pprint
from attrdict import AttrDict
-import xml.etree.ElementTree as ET
+import xml.etree.ElementTree as ET
from collections import Counter
import re
# 将测试集和训练集数据从xml格式提取成txt形式
-if __name__ == '__main__':
+if __name__ == '__main__':
# 读入参数
yaml_file = './electra.base.yaml'
with open(yaml_file, 'rt') as f:
@@ -37,21 +37,21 @@
file_path = data_path + args.dev_path
xmlp = ET.XMLParser(encoding="utf-8")
tree = ET.parse(file_path, parser=xmlp)
- root = tree.getroot()
+ root = tree.getroot()
docs = []
for doc_id in range(len(root[0])):
doc_segs = []
doc = root[0][doc_id]
- for seg in doc.iter('seg'):
+ for seg in doc.iter('seg'):
doc_segs.append(seg.text)
docs.extend(doc_segs)
-
+
dev_texts = [re.sub(r'\s+', ' ', ''.join(d)).strip() for d in docs]
with open(data_path + args.output_dev_path, 'w', encoding='utf-8') as f:
for text in dev_texts:
f.write(text + '\n')
-
+
file_path = data_path + args.test_path
xmlp = ET.XMLParser(encoding="utf-8")
@@ -72,24 +72,23 @@
with open(data_path + args.output_test_path, 'w', encoding='utf-8') as f:
for text in test_texts_2012:
f.write(text + '\n')
-
+
file_path = data_path + args.train_path
with open(file_path) as f:
xml = f.read()
- tree = ET.fromstring(""+ xml + "")
-
+ tree = ET.fromstring("" + xml + "")
+
docs = []
for doc in tree.iter('transcript'):
- text_arr=doc.text.split('\n')
- text_arr=[item.strip() for item in text_arr if(len(item.strip())>2)]
+ text_arr = doc.text.split('\n')
+ text_arr = [
+ item.strip() for item in text_arr if (len(item.strip()) > 2)
+ ]
# print(text_arr)
docs.extend(text_arr)
# break
- train_texts=docs
+ train_texts = docs
with open(data_path + args.output_train_path, 'w', encoding='utf-8') as f:
for text in train_texts:
f.write(text + '\n')
-
-
-
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/dataloader.py b/transformer_courses/Transformer_Punctuation_Restoration/dataloader.py
old mode 100644
new mode 100755
index ef00ab069..1c95fe939
--- a/transformer_courses/Transformer_Punctuation_Restoration/dataloader.py
+++ b/transformer_courses/Transformer_Punctuation_Restoration/dataloader.py
@@ -1,4 +1,3 @@
-
# -*- coding: UTF-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
@@ -15,13 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import yaml
-import argparse
+import yaml
+import argparse
from pprint import pprint
from attrdict import AttrDict
import paddle
-from paddle.io import DataLoader
+from paddle.io import DataLoader
import paddle
from paddlenlp.transformers import LinearDecayWithWarmup
@@ -42,29 +41,25 @@ class TEDTalk(DatasetBuilder):
构建针对TEDTalk数据集的dataset的类
'''
- SPLITS = {
- 'train': 'train.tsv',
- 'dev':'dev.tsv',
- 'test': 'test.tsv'
- }
+ SPLITS = {'train': 'train.tsv', 'dev': 'dev.tsv', 'test': 'test.tsv'}
def _get_data(self, mode, **kwargs):
- default_root='.'
- self.mode=mode
+ default_root = '.'
+ self.mode = mode
filename = self.SPLITS[mode]
fullname = os.path.join(default_root, filename)
return fullname
def _read(self, filename, *args):
- df=pd.read_csv(filename,sep='\t')
- for idx,row in df.iterrows():
- text=row['text_a']
- if(type(text)==float):
+ df = pd.read_csv(filename, sep='\t')
+ for idx, row in df.iterrows():
+ text = row['text_a']
+ if (type(text) == float):
print(text)
continue
- tokens=row['text_a'].split()
- tags=row['label'].split()
+ tokens = row['text_a'].split()
+ tags = row['label'].split()
# if(self.mode=='train'):
# tags=row['label'].split()
# else:
@@ -74,18 +69,18 @@ def _read(self, filename, *args):
def get_labels(self):
return ["0", "1", "2", "3"]
-
+
+
def load_dataset(path_or_read_func,
name=None,
data_files=None,
splits=None,
lazy=None,
**kwargs):
-
'''
根据需要的数据集类型,加载相应TEDTalk dataset
'''
-
+
reader_cls = TEDTalk
print(reader_cls)
if not name:
@@ -93,10 +88,15 @@ def load_dataset(path_or_read_func,
else:
reader_instance = reader_cls(lazy=lazy, name=name, **kwargs)
- datasets = reader_instance.read_datasets(data_files=data_files, splits=splits)
+ datasets = reader_instance.read_datasets(
+ data_files=data_files, splits=splits)
return datasets
-
-def tokenize_and_align_labels(example, tokenizer, no_entity_id, max_seq_len=512):
+
+
+def tokenize_and_align_labels(example,
+ tokenizer,
+ no_entity_id,
+ max_seq_len=512):
labels = example['labels']
example = example['tokens']
# print(labels)
@@ -115,6 +115,7 @@ def tokenize_and_align_labels(example, tokenizer, no_entity_id, max_seq_len=512)
# print(tokenized_input)
return tokenized_input
+
def create_train_dataloader(args):
'''
构建用于训练的dataloader
@@ -126,54 +127,55 @@ def create_train_dataloader(args):
train_data_loader:训练数据data loader
valid_data_loader:验证数据data loader
'''
-
+
# 加载dataset
- train_ds, valid_ds = load_dataset('TEDTalk', splits=('train', 'dev'), lazy=False)
+ train_ds, valid_ds = load_dataset(
+ 'TEDTalk', splits=('train', 'dev'), lazy=False)
label_list = train_ds.label_list
label_num = len(label_list)
# no_entity_id = label_num - 1
- no_entity_id=0
-
+ no_entity_id = 0
+
print(label_list)
# 构建dataloader
- model_name_or_path=args.model_name_or_path
+ model_name_or_path = args.model_name_or_path
tokenizer = ElectraTokenizer.from_pretrained(model_name_or_path)
trans_func = partial(
- tokenize_and_align_labels,
- tokenizer=tokenizer,
- no_entity_id=no_entity_id,
- max_seq_len=args.max_seq_length)
+ tokenize_and_align_labels,
+ tokenizer=tokenizer,
+ no_entity_id=no_entity_id,
+ max_seq_len=args.max_seq_length)
train_ds = train_ds.map(trans_func)
-
+
batchify_fn = lambda samples, fn=Dict({
'input_ids': Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype='int32'), # input
'token_type_ids': Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype='int32'), # segment
'seq_len': Stack(dtype='int64'), # seq_len
'labels': Pad(axis=0, pad_val=args.ignore_label, dtype='int64') # label
}): fn(samples)
-
+
train_batch_sampler = paddle.io.DistributedBatchSampler(
- train_ds, batch_size=args.batch_size, shuffle=True, drop_last=True)
-
+ train_ds, batch_size=args.batch_size, shuffle=True, drop_last=True)
+
train_data_loader = DataLoader(
- dataset=train_ds,
- collate_fn=batchify_fn,
- num_workers=0,
- batch_sampler=train_batch_sampler,
- return_list=True)
+ dataset=train_ds,
+ collate_fn=batchify_fn,
+ num_workers=0,
+ batch_sampler=train_batch_sampler,
+ return_list=True)
valid_ds = valid_ds.map(trans_func)
valid_data_loader = DataLoader(
- dataset=valid_ds,
- collate_fn=batchify_fn,
- num_workers=0,
- batch_size=args.batch_size,
- return_list=True)
-
+ dataset=valid_ds,
+ collate_fn=batchify_fn,
+ num_workers=0,
+ batch_size=args.batch_size,
+ return_list=True)
+
# 测试
# for index,data in enumerate(train_data_loader):
# # print(len(data))
@@ -181,8 +183,9 @@ def create_train_dataloader(args):
# print(data)
# break
- return train_data_loader, valid_data_loader
-
+ return train_data_loader, valid_data_loader
+
+
def create_test_dataloader(args):
'''
构建测试用的dataloader
@@ -193,21 +196,21 @@ def create_test_dataloader(args):
return:
test_data_loader
'''
- no_entity_id=0
-
+ no_entity_id = 0
+
# 加载dataset
test_ds = load_dataset('TEDTalk', splits=('test'), lazy=False)
# 构建dataloader
- model_name_or_path=args.model_name_or_path
+ model_name_or_path = args.model_name_or_path
tokenizer = ElectraTokenizer.from_pretrained(model_name_or_path)
trans_func = partial(
- tokenize_and_align_labels,
- tokenizer=tokenizer,
- no_entity_id=no_entity_id,
- max_seq_len=args.max_seq_length)
-
+ tokenize_and_align_labels,
+ tokenizer=tokenizer,
+ no_entity_id=no_entity_id,
+ max_seq_len=args.max_seq_length)
+
batchify_fn = lambda samples, fn=Dict({
'input_ids': Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype='int32'), # input
'token_type_ids': Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype='int32'), # segment
@@ -218,10 +221,10 @@ def create_test_dataloader(args):
test_ds = test_ds.map(trans_func)
test_data_loader = DataLoader(
- dataset=test_ds,
- collate_fn=batchify_fn,
- num_workers=0,
- batch_size=args.batch_size,
- return_list=True)
+ dataset=test_ds,
+ collate_fn=batchify_fn,
+ num_workers=0,
+ batch_size=args.batch_size,
+ return_list=True)
- return test_data_loader
\ No newline at end of file
+ return test_data_loader
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/electra.base.yaml b/transformer_courses/Transformer_Punctuation_Restoration/electra.base.yaml
old mode 100644
new mode 100755
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/predict.py b/transformer_courses/Transformer_Punctuation_Restoration/predict.py
old mode 100644
new mode 100755
index 30fafa49b..2af41f5d4
--- a/transformer_courses/Transformer_Punctuation_Restoration/predict.py
+++ b/transformer_courses/Transformer_Punctuation_Restoration/predict.py
@@ -1,4 +1,3 @@
-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import yaml
-import argparse
+import yaml
+import argparse
from pprint import pprint
from attrdict import AttrDict
@@ -22,9 +21,10 @@
import paddle
from paddlenlp.transformers import ElectraForTokenClassification, ElectraTokenizer
-from dataloader import create_test_dataloader,load_dataset
+from dataloader import create_test_dataloader, load_dataset
from utils import evaluate, write2txt
+
def parse_decodes(input_words, id2label, decodes, lens):
decodes = [x for batch in decodes for x in batch]
lens = [x for batch in lens for x in batch]
@@ -36,16 +36,17 @@ def parse_decodes(input_words, id2label, decodes, lens):
sent_out = []
tags_out = []
for s, t in zip(sent, tags):
- if(t=='0'):
+ if (t == '0'):
sent_out.append(s)
else:
# sent_out.append(s)
- sent_out.append(s+punctuation_dec[t])
- sent=' '.join(sent_out)
- sent=sent.replace(' ##','')
+ sent_out.append(s + punctuation_dec[t])
+ sent = ' '.join(sent_out)
+ sent = sent.replace(' ##', '')
outputs.append(sent)
return outputs
+
def do_predict(test_data_loader):
for step, batch in enumerate(test_data_loader):
input_ids, token_type_ids, length, labels = batch
@@ -56,6 +57,7 @@ def do_predict(test_data_loader):
preds = parse_decodes(raw_data, id2label, pred_list, len_list)
return preds
+
if __name__ == '__main__':
# 读入参数
yaml_file = './electra.base.yaml'
@@ -65,7 +67,7 @@ def do_predict(test_data_loader):
# 加载模型参数
best_model = args.best_model
- init_checkpoint_path=os.path.join(args.output_dir, best_model)
+ init_checkpoint_path = os.path.join(args.output_dir, best_model)
model_dict = paddle.load(init_checkpoint_path)
# 加载dataset
@@ -75,26 +77,27 @@ def do_predict(test_data_loader):
label_num = len(label_list)
# 加载模型与模型参数
- model = ElectraForTokenClassification.from_pretrained(args.model_name_or_path, num_classes=label_num)
- model.set_dict(model_dict)
-
+ model = ElectraForTokenClassification.from_pretrained(
+ args.model_name_or_path, num_classes=label_num)
+ model.set_dict(model_dict)
+
# 构建符号解码字典
punctuation_dec = {
- '0': 'O',
- '1': ',',
- '2': '.',
- '3': '?',
- }
-
+ '0': 'O',
+ '1': ',',
+ '2': '.',
+ '3': '?',
+ }
+
id2label = dict(enumerate(label_list))
raw_data = test_ds.data
-
+
model.eval()
pred_list = []
len_list = []
# 加载测试集data loader
- test_data_loader = create_test_dataloader(args)
+ test_data_loader = create_test_dataloader(args)
# 设置损失函数 - Cross Entropy
loss_fct = paddle.nn.loss.CrossEntropyLoss(ignore_index=args.ignore_label)
@@ -107,4 +110,4 @@ def do_predict(test_data_loader):
# 将预测结果解码成真实句子,写入到txt文件
if args.isSavingPreds == 1:
- write2txt(args, preds)
+ write2txt(args, preds)
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/requirements.txt b/transformer_courses/Transformer_Punctuation_Restoration/requirements.txt
old mode 100644
new mode 100755
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/train.py b/transformer_courses/Transformer_Punctuation_Restoration/train.py
old mode 100644
new mode 100755
index fd10cb90e..4acef1992
--- a/transformer_courses/Transformer_Punctuation_Restoration/train.py
+++ b/transformer_courses/Transformer_Punctuation_Restoration/train.py
@@ -1,4 +1,3 @@
-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,8 @@
import os
import time
-import yaml
-import argparse
+import yaml
+import argparse
from pprint import pprint
from attrdict import AttrDict
@@ -33,22 +32,23 @@
from utils import compute_metrics, evaluate
import paddle.distributed as dist
-
-import yaml
-import argparse
+
+import yaml
+import argparse
from pprint import pprint
from attrdict import AttrDict
import paddle
-from paddle.io import DataLoader
+from paddle.io import DataLoader
import os
import pandas as pd
from sklearn.metrics import classification_report
from functools import partial
-
+
+
def do_train(args):
- last_step = args.num_train_epochs * len(train_data_loader)
+ last_step = args.num_train_epochs * len(train_data_loader)
tic_train = time.time()
for epoch in range(args.num_train_epochs):
@@ -58,12 +58,13 @@ def do_train(args):
input_ids, token_type_ids, _, labels = batch
logits = model(input_ids, token_type_ids)
loss = loss_fct(logits, labels)
- avg_loss = paddle.mean(loss)
+ avg_loss = paddle.mean(loss)
if args.global_step % args.logging_steps == 0:
- print("global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
- % (args.global_step, epoch, step, avg_loss,
- args.logging_steps / (time.time() - tic_train)))
+ print(
+ "global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
+ % (args.global_step, epoch, step, avg_loss,
+ args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
avg_loss.backward()
@@ -73,8 +74,12 @@ def do_train(args):
if args.global_step % args.save_steps == 0 or args.global_step == last_step:
if paddle.distributed.get_rank() == 0:
- evaluate(model, loss_fct, valid_data_loader, label_num)
- paddle.save(model.state_dict(),os.path.join(args.output_dir, "model_%d.pdparams" % args.global_step))
+ evaluate(model, loss_fct, valid_data_loader, label_num)
+ paddle.save(
+ model.state_dict(),
+ os.path.join(args.output_dir,
+ "model_%d.pdparams" % args.global_step))
+
# 模型训练
if __name__ == '__main__':
@@ -82,38 +87,42 @@ def do_train(args):
yaml_file = './electra.base.yaml'
with open(yaml_file, 'rt') as f:
args = AttrDict(yaml.safe_load(f))
-
- paddle.set_device(args.device) # 使用gpu,相应地,安装paddlepaddle-gpu
-
- train_data_loader, valid_data_loader = create_train_dataloader(args)
+
+ paddle.set_device(args.device) # 使用gpu,相应地,安装paddlepaddle-gpu
+
+ train_data_loader, valid_data_loader = create_train_dataloader(args)
# 加载dataset
# Create dataset, tokenizer and dataloader.
- train_ds, test_ds = load_dataset('TEDTalk', splits=('train', 'test'), lazy=False)
+ train_ds, test_ds = load_dataset(
+ 'TEDTalk', splits=('train', 'test'), lazy=False)
label_list = train_ds.label_list
label_num = len(label_list)
# 加载预训练模型
# Define the model netword and its loss
- model = ElectraForTokenClassification.from_pretrained(args.model_name_or_path, num_classes= label_num)
+ model = ElectraForTokenClassification.from_pretrained(
+ args.model_name_or_path, num_classes=label_num)
# 设置AdamW优化器
- num_training_steps = args.max_steps if args.max_steps > 0 else len(train_data_loader) * args.num_train_epochs
- lr_scheduler = LinearDecayWithWarmup(float(args.learning_rate), num_training_steps, args.warmup_steps)
+ num_training_steps = args.max_steps if args.max_steps > 0 else len(
+ train_data_loader) * args.num_train_epochs
+ lr_scheduler = LinearDecayWithWarmup(
+ float(args.learning_rate), num_training_steps, args.warmup_steps)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
- p.name for n, p in model.named_parameters()
- if not any(nd in n for nd in ["bias", "norm"])
- ]
+ p.name for n, p in model.named_parameters()
+ if not any(nd in n for nd in ["bias", "norm"])
+ ]
optimizer = paddle.optimizer.AdamW(
- learning_rate=lr_scheduler,
- epsilon=float(args.adam_epsilon),
- parameters=model.parameters(),
- weight_decay=args.weight_decay,
- apply_decay_param_fun=lambda x: x in decay_params)
+ learning_rate=lr_scheduler,
+ epsilon=float(args.adam_epsilon),
+ parameters=model.parameters(),
+ weight_decay=args.weight_decay,
+ apply_decay_param_fun=lambda x: x in decay_params)
# 设置损失函数 - Cross Entropy
loss_fct = paddle.nn.loss.CrossEntropyLoss(ignore_index=args.ignore_label)
diff --git a/transformer_courses/Transformer_Punctuation_Restoration/utils.py b/transformer_courses/Transformer_Punctuation_Restoration/utils.py
old mode 100644
new mode 100755
index fefb404e9..05a27e9c6
--- a/transformer_courses/Transformer_Punctuation_Restoration/utils.py
+++ b/transformer_courses/Transformer_Punctuation_Restoration/utils.py
@@ -15,32 +15,34 @@
import paddle
from sklearn.metrics import classification_report
+
def compute_metrics(labels, decodes, lens):
decodes = [x for batch in decodes for x in batch]
lens = [x for batch in lens for x in batch]
- labels=[x for batch in labels for x in batch]
+ labels = [x for batch in labels for x in batch]
outputs = []
- nb_correct=0
- nb_true=0
- val_f1s=[]
- label_vals=[0,1,2,3]
- y_trues=[]
- y_preds=[]
+ nb_correct = 0
+ nb_true = 0
+ val_f1s = []
+ label_vals = [0, 1, 2, 3]
+ y_trues = []
+ y_preds = []
for idx, end in enumerate(lens):
y_true = labels[idx][:end].tolist()
y_pred = [x for x in decodes[idx][:end]]
nb_correct += sum(y_t == y_p for y_t, y_p in zip(y_true, y_pred))
- nb_true+=len(y_true)
+ nb_true += len(y_true)
y_trues.extend(y_true)
y_preds.extend(y_pred)
score = nb_correct / nb_true
# val_f1 = metrics.f1_score(y_trues, y_preds, average='micro', labels=label_vals)
- result=classification_report(y_trues, y_preds)
+ result = classification_report(y_trues, y_preds)
# print(val_f1)
- return score,result
-
+ return score, result
+
+
def evaluate(model, loss_fct, data_loader, label_num):
'''
模型评估
@@ -48,7 +50,7 @@ def evaluate(model, loss_fct, data_loader, label_num):
model.eval()
pred_list = []
len_list = []
- labels_list=[]
+ labels_list = []
for batch in data_loader:
input_ids, token_type_ids, length, labels = batch
logits = model(input_ids, token_type_ids)
@@ -58,11 +60,12 @@ def evaluate(model, loss_fct, data_loader, label_num):
pred_list.append(pred.numpy())
len_list.append(length.numpy())
labels_list.append(labels.numpy())
- accuracy, result=compute_metrics(labels_list, pred_list, len_list)
+ accuracy, result = compute_metrics(labels_list, pred_list, len_list)
print("eval loss: %f, accuracy: %f" % (avg_loss, accuracy))
print(result)
model.train()
+
def write2txt(args, preds):
'''
将预测结果导入到txt文件
@@ -71,5 +74,7 @@ def write2txt(args, preds):
with open(file_path, "w", encoding="utf8") as fout:
fout.write("\n".join(preds))
# Print some examples
- print("The results have been saved in the file: %s, 5 examples are shown below: " % file_path)
- print("\n".join(preds[:5]))
\ No newline at end of file
+ print(
+ "The results have been saved in the file: %s, 5 examples are shown below: "
+ % file_path)
+ print("\n".join(preds[:5]))
diff --git a/transformer_courses/object_detection_DETR/README.md b/transformer_courses/object_detection_DETR/README.md
old mode 100644
new mode 100755
index 788499585..238a7e9a4
--- a/transformer_courses/object_detection_DETR/README.md
+++ b/transformer_courses/object_detection_DETR/README.md
@@ -33,34 +33,34 @@ python -c "import paddle; print(paddle.__version__)"
# # 目录结构
- |-- data:数据处理
- |-- dataset.py:COCO格式数据解析
- |-- operators.py:包含各种预处理方法
- |-- models:模型
- |-- backbone.py:backbone模型
- |-- positional_encoding.py:位置编码器
- |-- transformer.py:transormer模型
- |-- hungarian_matcher.py:双边匹配方法
- |-- loss.py:计算损失函数
- |-- detr_head.py:DETR的head模型
- |-- post_process.py:DETR结果后处理
- |-- detr.py:DETR的整体结构
- |-- layers.py、ops.py:定义网络层
- |-- initializer.py:初始化方法
- |-- callbacks.py:callbacks方法,LogPrinter等
- |-- optimizer.py:优化器、学习率衰减等方法
- |-- save_model.py:保存模型
- |-- utils:
- |-- bbox_utils.py:框bbox的处理
- |-- load_model.py:加载预训练模型
- |-- util.py:定义GIoULoss等函数
- |-- imgs:README图片
- |-- test_imgs:测试图片
- |-- output:测试结果图片
- |-- main.py:主函数
- |-- train_model.py:训练代码
- |-- eval_model.py:验证代码
- |-- test_model.py:测试代码
+ |-- data:数据处理
+ |-- dataset.py:COCO格式数据解析
+ |-- operators.py:包含各种预处理方法
+ |-- models:模型
+ |-- backbone.py:backbone模型
+ |-- positional_encoding.py:位置编码器
+ |-- transformer.py:transormer模型
+ |-- hungarian_matcher.py:双边匹配方法
+ |-- loss.py:计算损失函数
+ |-- detr_head.py:DETR的head模型
+ |-- post_process.py:DETR结果后处理
+ |-- detr.py:DETR的整体结构
+ |-- layers.py、ops.py:定义网络层
+ |-- initializer.py:初始化方法
+ |-- callbacks.py:callbacks方法,LogPrinter等
+ |-- optimizer.py:优化器、学习率衰减等方法
+ |-- save_model.py:保存模型
+ |-- utils:
+ |-- bbox_utils.py:框bbox的处理
+ |-- load_model.py:加载预训练模型
+ |-- util.py:定义GIoULoss等函数
+ |-- imgs:README图片
+ |-- test_imgs:测试图片
+ |-- output:测试结果图片
+ |-- main.py:主函数
+ |-- train_model.py:训练代码
+ |-- eval_model.py:验证代码
+ |-- test_model.py:测试代码
# 准备数据集
@@ -75,16 +75,16 @@ unzip -q -o annotations_trainval2017.zip -d dataset
解压之后,完整COCO数据存储结构:
```bash
- |-- annotations:标注文件
- |-- person_keypoints_train2017.json:关键点检测
- |-- person_keypoints_val2017.json
- |-- captions_train2017.json:看图说话
- |-- captions_val2017.json
- |-- instances_train2017.json:目标实例
- |-- instances_val2017.json
- |-- images:图片
- |-- train2017
- |-- val2017
+ |-- annotations:标注文件
+ |-- person_keypoints_train2017.json:关键点检测
+ |-- person_keypoints_val2017.json
+ |-- captions_train2017.json:看图说话
+ |-- captions_val2017.json
+ |-- instances_train2017.json:目标实例
+ |-- instances_val2017.json
+ |-- images:图片
+ |-- train2017
+ |-- val2017
```
# 训练
@@ -122,4 +122,3 @@ python main.py --mode='test' --infer_img='test_imgs/000000014439.jpg' --anno_pa
**效果展示**:

-
diff --git a/transformer_courses/object_detection_DETR/bbox.json b/transformer_courses/object_detection_DETR/bbox.json
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/data/__init__.py b/transformer_courses/object_detection_DETR/data/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/data/dataset.py b/transformer_courses/object_detection_DETR/data/dataset.py
old mode 100644
new mode 100755
index 82e6a78b3..98094a4a7
--- a/transformer_courses/object_detection_DETR/data/dataset.py
+++ b/transformer_courses/object_detection_DETR/data/dataset.py
@@ -5,6 +5,7 @@
from collections.abc import Sequence
from paddle.io import Dataset
+
class COCODataSet(Dataset):
"""
Load dataset with COCO format.
@@ -39,7 +40,7 @@ def __init__(self,
self.anno_path = anno_path
self.image_dir = image_dir if image_dir is not None else ''
self.data_fields = data_fields
- self.sample_num = sample_num
+ self.sample_num = sample_num
self.load_image_only = False
self.load_semantic = False
self.load_crowd = load_crowd
@@ -112,7 +113,7 @@ def parse_dataset(self):
if 'annotations' not in coco.dataset:
self.load_image_only = True
print('Annotation file: {} does not contains ground truth '
- 'and load image information only.'.format(anno_path))
+ 'and load image information only.'.format(anno_path))
for img_id in img_ids:
img_anno = coco.loadImgs([img_id])[0]
@@ -125,13 +126,13 @@ def parse_dataset(self):
is_empty = False
if not os.path.exists(im_path):
print('Illegal image file: {}, and it will be '
- 'ignored'.format(im_path))
+ 'ignored'.format(im_path))
continue
if im_w < 0 or im_h < 0:
print('Illegal width: {} or height: {} in annotation, '
- 'and im_id: {} will be ignored'.format(
- im_w, im_h, img_id))
+ 'and im_id: {} will be ignored'.format(im_w, im_h,
+ img_id))
continue
coco_rec = {
@@ -143,7 +144,8 @@ def parse_dataset(self):
if not self.load_image_only:
ins_anno_ids = coco.getAnnIds(
- imgIds=[img_id], iscrowd=None if self.load_crowd else False)
+ imgIds=[img_id],
+ iscrowd=None if self.load_crowd else False)
instances = coco.loadAnns(ins_anno_ids)
bboxes = []
@@ -265,7 +267,6 @@ def parse_dataset(self):
records += empty_records
self.roidbs = records
-
def set_kwargs(self, **kwargs):
self.mixup_epoch = kwargs.get('mixup_epoch', -1)
self.cutmix_epoch = kwargs.get('cutmix_epoch', -1)
diff --git a/transformer_courses/object_detection_DETR/data/operators.py b/transformer_courses/object_detection_DETR/data/operators.py
old mode 100644
new mode 100755
index fd533069c..b6345a4de
--- a/transformer_courses/object_detection_DETR/data/operators.py
+++ b/transformer_courses/object_detection_DETR/data/operators.py
@@ -16,12 +16,16 @@
from utils import bbox_utils
SIZE_UNIT = ['K', 'M', 'G', 'T']
+
+
def _parse_size_in_M(size_str):
num, unit = size_str[:-1], size_str[-1]
assert unit in SIZE_UNIT, \
"unknown shm size unit {}".format(unit)
return float(num) * \
(1024 ** (SIZE_UNIT.index(unit) - 1))
+
+
class Compose(object):
def __init__(self, transforms, num_classes=80):
self.transforms = transforms
@@ -42,8 +46,8 @@ def __call__(self, data):
except Exception as e:
stack_info = traceback.format_exc()
print("fail to map sample transform [{}] "
- "with error: {} and stack:\n{}".format(
- f, e, str(stack_info)))
+ "with error: {} and stack:\n{}".format(f, e,
+ str(stack_info)))
raise e
return data
@@ -60,8 +64,8 @@ def __call__(self, data):
except Exception as e:
stack_info = traceback.format_exc()
print("fail to map batch transform [{}] "
- "with error: {} and stack:\n{}".format(
- f, e, str(stack_info)))
+ "with error: {} and stack:\n{}".format(f, e,
+ str(stack_info)))
raise e
# remove keys which is not needed by model
@@ -131,7 +135,7 @@ def __init__(self,
sample_transforms, num_classes=num_classes)
# batch transfrom
self._batch_transforms = BatchCompose(batch_transforms, num_classes,
- collate_batch)
+ collate_batch)
self.batch_size = batch_size
self.shuffle = shuffle
self.drop_last = drop_last
@@ -169,7 +173,7 @@ def __call__(self,
shm_size = _get_shared_memory_size_in_M()
if shm_size is not None and shm_size < 1024.:
print("Shared memory size is less than 1G, "
- "disable shared_memory in DataLoader")
+ "disable shared_memory in DataLoader")
use_shared_memory = False
self.dataloader = DataLoader(
dataset=self.dataset,
@@ -197,7 +201,6 @@ def __next__(self):
def next(self):
# python2 compatibility
return self.__next__()
-
class BaseOperator(object):
@@ -234,6 +237,7 @@ def __call__(self, sample, context=None):
def __str__(self):
return str(self._id)
+
class Decode(BaseOperator):
def __init__(self):
""" Transform the image data to numpy format following the rgb format
@@ -276,6 +280,7 @@ def apply(self, sample, context=None):
sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return sample
+
class RandomFlip(BaseOperator):
def __init__(self, prob=0.5):
"""
@@ -305,7 +310,8 @@ def _flip_rle(rle, height, width):
for segm in segms:
if is_poly(segm):
# Polygon format
- flipped_segms.append([_flip_poly(poly, width) for poly in segm])
+ flipped_segms.append(
+ [_flip_poly(poly, width) for poly in segm])
else:
# RLE format
import pycocotools.mask as mask_util
@@ -373,13 +379,14 @@ def apply(self, sample, context=None):
sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]
if 'gt_rbox2poly' in sample and sample['gt_rbox2poly'].any():
- sample['gt_rbox2poly'] = self.apply_rbox(sample['gt_rbox2poly'],
- width)
+ sample['gt_rbox2poly'] = self.apply_rbox(
+ sample['gt_rbox2poly'], width)
sample['flipped'] = True
sample['image'] = im
return sample
+
class RandomSelect(BaseOperator):
"""
Randomly choose a transformation between transforms1 and transforms2,
@@ -397,6 +404,7 @@ def apply(self, sample, context=None):
return self.transforms1(sample)
return self.transforms2(sample)
+
class RandomShortSideResize(BaseOperator):
def __init__(self,
short_side_sizes,
@@ -461,8 +469,8 @@ def resize(self,
if len(im.shape) != 3:
raise ImageError('{}: image is not 3-dimensional.'.format(self))
- target_size = self.get_size_with_aspect_ratio(im.shape[:2], target_size,
- max_size)
+ target_size = self.get_size_with_aspect_ratio(im.shape[:2],
+ target_size, max_size)
im_scale_y, im_scale_x = target_size[1] / im.shape[0], target_size[
0] / im.shape[1]
@@ -483,8 +491,8 @@ def resize(self,
sample['gt_bbox'], [im_scale_x, im_scale_y], target_size)
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
- sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im.shape[:2],
- [im_scale_x, im_scale_y])
+ sample['gt_poly'] = self.apply_segm(
+ sample['gt_poly'], im.shape[:2], [im_scale_x, im_scale_y])
# apply semantic
if 'semantic' in sample and sample['semantic']:
semantic = sample['semantic']
@@ -677,14 +685,15 @@ def _crop_poly(segm, crop):
if not isinstance(part, Polygon):
continue
part = np.squeeze(
- np.array(part.exterior.coords[:-1]).reshape(1,
- -1))
+ np.array(part.exterior.coords[:-1]).reshape(
+ 1, -1))
part[0::2] -= xmin
part[1::2] -= ymin
crop_segm.append(part.tolist())
elif isinstance(inter, Polygon):
crop_poly = np.squeeze(
- np.array(inter.exterior.coords[:-1]).reshape(1, -1))
+ np.array(inter.exterior.coords[:-1]).reshape(1,
+ -1))
crop_poly[0::2] -= xmin
crop_poly[1::2] -= ymin
crop_segm.append(crop_poly.tolist())
@@ -726,8 +735,11 @@ def apply(self, sample, context=None):
region = self.get_crop_params(sample['image'].shape[:2], [h, w])
return self.crop(sample, region)
+
class NormalizeImage(BaseOperator):
- def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1],
+ def __init__(self,
+ mean=[0.485, 0.456, 0.406],
+ std=[1, 1, 1],
is_scale=True):
"""
Args:
@@ -795,6 +807,7 @@ def apply(self, sample, context):
return sample
+
class BboxXYXY2XYWH(BaseOperator):
"""
Convert bbox XYXY format to XYWH format.
@@ -825,6 +838,7 @@ def apply(self, sample, context=None):
sample['image'] = im
return sample
+
class Resize(BaseOperator):
def __init__(self, target_size, keep_ratio, interp=cv2.INTER_LINEAR):
"""
@@ -968,8 +982,8 @@ def apply(self, sample, context=None):
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
- sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_shape[:2],
- [im_scale_x, im_scale_y])
+ sample['gt_poly'] = self.apply_segm(
+ sample['gt_poly'], im_shape[:2], [im_scale_x, im_scale_y])
# apply semantic
if 'semantic' in sample and sample['semantic']:
@@ -1001,6 +1015,7 @@ def apply(self, sample, context=None):
return sample
+
class PadMaskBatch(BaseOperator):
"""
Pad a batch of samples so they can be divisible by a stride.
@@ -1022,7 +1037,7 @@ def __call__(self, samples, context=None):
Args:
samples (list): a batch of sample, each is dict.
"""
-
+
coarsest_stride = self.pad_to_stride
max_shape = np.array([data['image'].shape for data in samples]).max(
@@ -1065,4 +1080,4 @@ def __call__(self, samples, context=None):
rbox = bbox_utils.poly2rbox(polys)
data['gt_rbox'] = rbox
- return samples
\ No newline at end of file
+ return samples
diff --git a/transformer_courses/object_detection_DETR/eval_model.py b/transformer_courses/object_detection_DETR/eval_model.py
old mode 100644
new mode 100755
index be1bdcd0a..5e8b0b403
--- a/transformer_courses/object_detection_DETR/eval_model.py
+++ b/transformer_courses/object_detection_DETR/eval_model.py
@@ -8,6 +8,7 @@
from data.operators import *
from models import ComposeCallback, LogPrinter
+
def get_categories(metric_type, anno_file=None, arch=None):
"""
Get class id to category id map and category id
@@ -31,6 +32,7 @@ def get_categories(metric_type, anno_file=None, arch=None):
else:
return _coco17_category()
+
def _coco17_category():
"""
Get class id to category id map and category id
@@ -209,6 +211,7 @@ def _coco17_category():
return clsid2catid, catid2name
+
def get_infer_results(outs, catid, bias=0):
"""
Get result at the stage of inference.
@@ -234,6 +237,8 @@ def get_infer_results(outs, catid, bias=0):
outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
return infer_res
+
+
def get_det_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
det_res = []
k = 0
@@ -259,6 +264,7 @@ def get_det_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
det_res.append(dt_res)
return det_res
+
def cocoapi_eval(jsonfile,
style,
coco_gt=None,
@@ -298,6 +304,7 @@ def cocoapi_eval(jsonfile,
sys.stdout.flush()
return coco_eval.stats
+
class COCOMetric(paddle.metric.Metric):
def __init__(self, anno_file, **kwargs):
assert os.path.isfile(anno_file), \
@@ -313,7 +320,7 @@ def __init__(self, anno_file, **kwargs):
self.save_prediction_only = kwargs.get('save_prediction_only', False)
self.iou_type = kwargs.get('IouType', 'bbox')
self.reset()
-
+
def name(self):
return self.__class__.__name__
@@ -360,10 +367,11 @@ def log(self):
def get_results(self):
return self.eval_results
+
def _init_metrics(dataset):
# pass clsid2catid info to metric instance to avoid multiple loading
# annotation file
- clsid2catid = {v: k for k, v in dataset.catid2clsid.items()}
+ clsid2catid = {v: k for k, v in dataset.catid2clsid.items()}
# when do validation in train, annotation file should be get from
# EvalReader instead of self.dataset(which is TrainReader)
@@ -381,23 +389,50 @@ def _init_metrics(dataset):
]
return _metrics
+
def _reset_metrics(_metrics):
for metric in _metrics:
metric.reset()
-def _eval_with_loader(model,dataset_dir,image_dir,anno_path):
+def _eval_with_loader(model, dataset_dir, image_dir, anno_path):
status = {}
_callbacks = [LogPrinter(model)]
- _compose_callback = ComposeCallback(_callbacks)
+ _compose_callback = ComposeCallback(_callbacks)
- dataset = COCODataSet(dataset_dir=dataset_dir, image_dir=image_dir,anno_path=anno_path)
+ dataset = COCODataSet(
+ dataset_dir=dataset_dir, image_dir=image_dir, anno_path=anno_path)
_eval_batch_sampler = paddle.io.BatchSampler(dataset, batch_size=1)
-
- sample_transforms = [{Decode: {}}, {Resize: {'target_size': [800, 1333], 'keep_ratio': True}}, {NormalizeImage: {'is_scale': True, 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}, {Permute: {}}]
- batch_transforms = [{PadMaskBatch:{'pad_to_stride': -1, 'return_pad_mask': True}}]
- loader = BaseDataLoader(sample_transforms, batch_transforms, batch_size=1, shuffle=False, drop_last=False, drop_empty=False)(dataset, 4, _eval_batch_sampler)
+ sample_transforms = [{
+ Decode: {}
+ }, {
+ Resize: {
+ 'target_size': [800, 1333],
+ 'keep_ratio': True
+ }
+ }, {
+ NormalizeImage: {
+ 'is_scale': True,
+ 'mean': [0.485, 0.456, 0.406],
+ 'std': [0.229, 0.224, 0.225]
+ }
+ }, {
+ Permute: {}
+ }]
+ batch_transforms = [{
+ PadMaskBatch: {
+ 'pad_to_stride': -1,
+ 'return_pad_mask': True
+ }
+ }]
+ loader = BaseDataLoader(
+ sample_transforms,
+ batch_transforms,
+ batch_size=1,
+ shuffle=False,
+ drop_last=False,
+ drop_empty=False)(dataset, 4, _eval_batch_sampler)
_metrics = _init_metrics(dataset=dataset)
@@ -430,6 +465,7 @@ def _eval_with_loader(model,dataset_dir,image_dir,anno_path):
# reset metric states for metric may performed multiple times
_reset_metrics(_metrics)
-def evaluate(model,dataset_dir,image_dir,anno_path):
+
+def evaluate(model, dataset_dir, image_dir, anno_path):
with paddle.no_grad():
- _eval_with_loader(model,dataset_dir,image_dir,anno_path)
+ _eval_with_loader(model, dataset_dir, image_dir, anno_path)
diff --git a/transformer_courses/object_detection_DETR/imgs/000000014439.jpg b/transformer_courses/object_detection_DETR/imgs/000000014439.jpg
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/imgs/eval.png b/transformer_courses/object_detection_DETR/imgs/eval.png
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/main.py b/transformer_courses/object_detection_DETR/main.py
old mode 100644
new mode 100755
index 73097da7f..33d492dbf
--- a/transformer_courses/object_detection_DETR/main.py
+++ b/transformer_courses/object_detection_DETR/main.py
@@ -1,41 +1,105 @@
import argparse
-from models import ResNet, DETRTransformer, HungarianMatcher, DETRLoss, DETRHead, DETRBBoxPostProcess,DETR
+from models import ResNet, DETRTransformer, HungarianMatcher, DETRLoss, DETRHead, DETRBBoxPostProcess, DETR
from utils import load_weights
from train_model import train
from eval_model import evaluate
from test_model import get_test_images, predict
+
+
# build model.
def build_model():
- backbone = ResNet(depth=50, norm_type='bn', freeze_at=0, return_idx=[3], lr_mult_list=[0.0, 0.1, 0.1, 0.1], num_stages=4)
+ backbone = ResNet(
+ depth=50,
+ norm_type='bn',
+ freeze_at=0,
+ return_idx=[3],
+ lr_mult_list=[0.0, 0.1, 0.1, 0.1],
+ num_stages=4)
- transformer = DETRTransformer(num_queries=100, position_embed_type='sine', nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', hidden_dim=256, backbone_num_channels=2048)
+ transformer = DETRTransformer(
+ num_queries=100,
+ position_embed_type='sine',
+ nhead=8,
+ num_encoder_layers=6,
+ num_decoder_layers=6,
+ dim_feedforward=2048,
+ dropout=0.1,
+ activation='relu',
+ hidden_dim=256,
+ backbone_num_channels=2048)
- matcher = HungarianMatcher(matcher_coeff={'class': 1, 'bbox': 5, 'giou': 2}, use_focal_loss=False)
+ matcher = HungarianMatcher(
+ matcher_coeff={'class': 1,
+ 'bbox': 5,
+ 'giou': 2}, use_focal_loss=False)
- loss = DETRLoss(loss_coeff={'class': 1, 'bbox': 5, 'giou': 2, 'no_object': 0.1, 'mask': 1, 'dice': 1}, aux_loss=True, num_classes=80, use_focal_loss=False, matcher=matcher)
+ loss = DETRLoss(
+ loss_coeff={
+ 'class': 1,
+ 'bbox': 5,
+ 'giou': 2,
+ 'no_object': 0.1,
+ 'mask': 1,
+ 'dice': 1
+ },
+ aux_loss=True,
+ num_classes=80,
+ use_focal_loss=False,
+ matcher=matcher)
- detr_head = DETRHead(num_mlp_layers=3, num_classes=80, hidden_dim=256, use_focal_loss=False, nhead=8, fpn_dims=[], loss=loss)
+ detr_head = DETRHead(
+ num_mlp_layers=3,
+ num_classes=80,
+ hidden_dim=256,
+ use_focal_loss=False,
+ nhead=8,
+ fpn_dims=[],
+ loss=loss)
post_process = DETRBBoxPostProcess(num_classes=80, use_focal_loss=False)
- model = DETR(backbone=backbone,
- transformer=transformer,
- detr_head=detr_head,
- post_process=post_process)
+ model = DETR(
+ backbone=backbone,
+ transformer=transformer,
+ detr_head=detr_head,
+ post_process=post_process)
return model
+
+
# python main.py --mode='train' --dataset_dir='dataset/' --image_dir='train2017' --anno_path='annotations/instances_train2017.json'
# python main.py --mode='eval' --dataset_dir='dataset/' --image_dir='val2017' --anno_path='annotations/instances_val2017.json'
# python main.py --mode=='test' --infer_img='test_imgs/000000014439.jpg'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument('--mode',type=str, default='train', help='choose mode for train or eval or test')
- parser.add_argument('--dataset_dir',type=str, default='dataset/', help='dir of datset')
- parser.add_argument('--image_dir',type=str, default='train2017/', help='dir of datset images')
- parser.add_argument('--anno_path',type=str, default='annotions/instances_train2017', help='json file')
- parser.add_argument('--infer_img',type=str, default='test_imgs/000000014439.jpg', help='test image')
- parser.add_argument('--pretrained_model',type=str,default='pretrained_model/detr',help='pretrained model')
-
+ parser.add_argument(
+ '--mode',
+ type=str,
+ default='train',
+ help='choose mode for train or eval or test')
+ parser.add_argument(
+ '--dataset_dir', type=str, default='dataset/', help='dir of datset')
+ parser.add_argument(
+ '--image_dir',
+ type=str,
+ default='train2017/',
+ help='dir of datset images')
+ parser.add_argument(
+ '--anno_path',
+ type=str,
+ default='annotions/instances_train2017',
+ help='json file')
+ parser.add_argument(
+ '--infer_img',
+ type=str,
+ default='test_imgs/000000014439.jpg',
+ help='test image')
+ parser.add_argument(
+ '--pretrained_model',
+ type=str,
+ default='pretrained_model/detr',
+ help='pretrained model')
+
args = parser.parse_args()
model = build_model()
@@ -43,19 +107,23 @@ def build_model():
if args.mode == 'train':
start_epoch = 0
end_epoch = 500
- train(model, start_epoch, end_epoch,
- dataset_dir=args.dataset_dir,
- image_dir=args.image_dir,
- anno_path=args.anno_path)
+ train(
+ model,
+ start_epoch,
+ end_epoch,
+ dataset_dir=args.dataset_dir,
+ image_dir=args.image_dir,
+ anno_path=args.anno_path)
if args.mode == 'eval':
# 模型评估
# load weights,predict,eval
load_weights(model, args.pretrained_model)
- evaluate(model,
- dataset_dir=args.dataset_dir,
- image_dir=args.image_dir,
- anno_path=args.anno_path)
+ evaluate(
+ model,
+ dataset_dir=args.dataset_dir,
+ image_dir=args.image_dir,
+ anno_path=args.anno_path)
if args.mode == 'test':
# 模型测试
@@ -68,4 +136,4 @@ def build_model():
model,
draw_threshold=0.5,
output_dir="output",
- anno_path=args.anno_path)
\ No newline at end of file
+ anno_path=args.anno_path)
diff --git a/transformer_courses/object_detection_DETR/models/__init__.py b/transformer_courses/object_detection_DETR/models/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/models/callbacks.py b/transformer_courses/object_detection_DETR/models/callbacks.py
old mode 100644
new mode 100755
index 7658ff671..6a191891b
--- a/transformer_courses/object_detection_DETR/models/callbacks.py
+++ b/transformer_courses/object_detection_DETR/models/callbacks.py
@@ -3,6 +3,8 @@
import paddle.distributed as dist
from .save_model import save_model
+
+
class Callback(object):
def __init__(self, model):
self.model = model
@@ -18,6 +20,8 @@ def on_epoch_begin(self, status):
def on_epoch_end(self, status):
pass
+
+
class ComposeCallback(object):
def __init__(self, callbacks):
callbacks = [c for c in list(callbacks) if c is not None]
@@ -44,7 +48,7 @@ def on_epoch_end(self, status):
class LogPrinter(Callback):
- def __init__(self,model, batch_size=2):
+ def __init__(self, model, batch_size=2):
super(LogPrinter, self).__init__(model)
self.batch_size = batch_size
@@ -60,7 +64,7 @@ def on_step_end(self, status):
data_time = status['data_time']
epoches = 500
- batch_size = self.batch_size
+ batch_size = self.batch_size
logs = training_staus.log()
space_fmt = ':' + str(len(str(steps_per_epoch))) + 'd'
@@ -126,14 +130,12 @@ def on_epoch_end(self, status):
print
if mode == 'train':
end_epoch = 500
-
- if (
- epoch_id + 1
- ) % 1 == 0 or epoch_id == end_epoch - 1:
-
+
+ if (epoch_id + 1) % 1 == 0 or epoch_id == end_epoch - 1:
+
save_name = str(
epoch_id) if epoch_id != end_epoch - 1 else "model_final"
weight = self.weight
if weight:
- save_model(weight, self.optimizers, self.save_dir,
- save_name, epoch_id + 1)
\ No newline at end of file
+ save_model(weight, self.optimizers, self.save_dir, save_name,
+ epoch_id + 1)
diff --git a/transformer_courses/object_detection_DETR/models/detr.py b/transformer_courses/object_detection_DETR/models/detr.py
old mode 100644
new mode 100755
index 35eb29925..73ac33db3
--- a/transformer_courses/object_detection_DETR/models/detr.py
+++ b/transformer_courses/object_detection_DETR/models/detr.py
@@ -1,6 +1,7 @@
import paddle
import paddle.nn as nn
+
class DETR(nn.Layer):
def __init__(self,
backbone,
@@ -49,7 +50,7 @@ def build_inputs(self, data, input_def):
for i, k in enumerate(input_def):
inputs[k] = data[i]
return inputs
-
+
def model_arch(self, ):
pass
@@ -67,4 +68,4 @@ def get_pred(self):
"bbox": bbox_pred,
"bbox_num": bbox_num,
}
- return output
\ No newline at end of file
+ return output
diff --git a/transformer_courses/object_detection_DETR/models/detr_head.py b/transformer_courses/object_detection_DETR/models/detr_head.py
old mode 100644
new mode 100755
index 5e86f80eb..34484d0b8
--- a/transformer_courses/object_detection_DETR/models/detr_head.py
+++ b/transformer_courses/object_detection_DETR/models/detr_head.py
@@ -2,7 +2,8 @@
import paddle.nn as nn
import paddle.nn.functional as F
-from . initializer import linear_init_
+from .initializer import linear_init_
+
class MLP(nn.Layer):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
@@ -13,7 +14,7 @@ def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
self._reset_parameters()
-
+
def _reset_parameters(self):
for l in self.layers:
linear_init_(l)
@@ -177,6 +178,7 @@ def __init__(self,
self.mask_head = MaskHeadFPNConv(hidden_dim + nhead, fpn_dims,
hidden_dim)
self._reset_parameters()
+
def _reset_parameters(self):
linear_init_(self.score_head)
@@ -238,4 +240,4 @@ def forward(self, out_transformer, body_feats, inputs=None):
masks=outputs_seg,
gt_mask=gt_mask)
else:
- return (outputs_bbox[-1], outputs_logit[-1], outputs_seg)
\ No newline at end of file
+ return (outputs_bbox[-1], outputs_logit[-1], outputs_seg)
diff --git a/transformer_courses/object_detection_DETR/models/hungarian_matcher.py b/transformer_courses/object_detection_DETR/models/hungarian_matcher.py
old mode 100644
new mode 100755
index 76e80d670..e4cc41479
--- a/transformer_courses/object_detection_DETR/models/hungarian_matcher.py
+++ b/transformer_courses/object_detection_DETR/models/hungarian_matcher.py
@@ -6,6 +6,7 @@
from utils.util import bbox_cxcywh_to_xyxy
from utils.util import GIoULoss
+
class HungarianMatcher(nn.Layer):
def __init__(self,
matcher_coeff={'class': 1,
@@ -52,7 +53,8 @@ def forward(self, boxes, logits, gt_bbox, gt_class):
# We flatten to compute the cost matrices in a batch
# [batch_size * num_queries, num_classes]
out_prob = F.sigmoid(logits.flatten(
- 0, 1)) if self.use_focal_loss else F.softmax(logits.flatten(0, 1))
+ 0, 1)) if self.use_focal_loss else F.softmax(
+ logits.flatten(0, 1))
# [batch_size * num_queries, 4]
out_bbox = boxes.flatten(0, 1)
diff --git a/transformer_courses/object_detection_DETR/models/initializer.py b/transformer_courses/object_detection_DETR/models/initializer.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/models/layers.py b/transformer_courses/object_detection_DETR/models/layers.py
old mode 100644
new mode 100755
index 686c73d8e..c695b1c1d
--- a/transformer_courses/object_detection_DETR/models/layers.py
+++ b/transformer_courses/object_detection_DETR/models/layers.py
@@ -29,6 +29,7 @@
from .initializer import xavier_uniform_, constant_
from utils.bbox_utils import delta2bbox
+
def _to_list(l):
if isinstance(l, (list, tuple)):
return list(l)
@@ -287,8 +288,6 @@ def forward(self, x):
return y
-
-
class AnchorGeneratorSSD(object):
def __init__(self,
steps=[8, 16, 32, 64, 100, 300],
@@ -422,8 +421,6 @@ def __call__(self, bbox_head_out, rois, im_shape, scale_factor):
return bboxes, scores
-
-
class MultiClassNMS(object):
def __init__(self,
score_threshold=.05,
@@ -467,8 +464,6 @@ def __call__(self, bboxes, score, background_label=-1):
return ops.multiclass_nms(bboxes, score, **kwargs)
-
-
class MatrixNMS(object):
__append_doc__ = True
@@ -505,8 +500,6 @@ def __call__(self, bbox, score, *args):
normalized=self.normalized)
-
-
class YOLOBox(object):
__shared__ = ['num_classes']
@@ -544,8 +537,6 @@ def __call__(self,
return yolo_boxes, yolo_scores
-
-
class SSDBox(object):
def __init__(self, is_normalized=True):
self.is_normalized = is_normalized
@@ -594,8 +585,6 @@ def __call__(self,
return boxes, scores
-
-
class AnchorGrid(object):
"""Generate anchor grid
@@ -682,8 +671,6 @@ def __call__(self):
return self._anchor_vars
-
-
class FCOSBox(object):
__shared__ = ['num_classes']
@@ -774,7 +761,6 @@ def __call__(self, locations, cls_logits, bboxes_reg, centerness,
return pred_boxes, pred_scores
-
class TTFBox(object):
__shared__ = ['down_ratio']
@@ -875,8 +861,6 @@ def __call__(self, hm, wh, im_shape, scale_factor):
return results, results_num
-
-
class JDEBox(object):
__shared__ = ['num_classes']
@@ -973,8 +957,6 @@ def __call__(self, yolo_head_out, anchors):
return boxes_idx_over_conf_thr, yolo_boxes_scores
-
-
class MaskMatrixNMS(object):
"""
Matrix NMS for multi-class masks.
diff --git a/transformer_courses/object_detection_DETR/models/loss.py b/transformer_courses/object_detection_DETR/models/loss.py
old mode 100644
new mode 100755
index 7091f56f1..fad450b3b
--- a/transformer_courses/object_detection_DETR/models/loss.py
+++ b/transformer_courses/object_detection_DETR/models/loss.py
@@ -5,6 +5,7 @@
from utils.util import bbox_cxcywh_to_xyxy
from utils.util import GIoULoss
+
class DETRLoss(nn.Layer):
def __init__(self,
num_classes=80,
@@ -137,7 +138,8 @@ def _get_loss_aux(self, boxes, logits, gt_bbox, gt_class, bg_index,
def _get_index_updates(self, num_query_objects, target, match_indices):
batch_idx = paddle.concat([
- paddle.full_like(src, i) for i, (src, _) in enumerate(match_indices)
+ paddle.full_like(src, i)
+ for i, (src, _) in enumerate(match_indices)
])
src_idx = paddle.concat([src for (src, _) in match_indices])
src_idx += (batch_idx * num_query_objects)
@@ -150,12 +152,14 @@ def _get_index_updates(self, num_query_objects, target, match_indices):
def _get_src_target_assign(self, src, target, match_indices):
src_assign = paddle.concat([
paddle.gather(
- t, I, axis=0) if len(I) > 0 else paddle.zeros([0, t.shape[-1]])
+ t, I, axis=0)
+ if len(I) > 0 else paddle.zeros([0, t.shape[-1]])
for t, (I, _) in zip(src, match_indices)
])
target_assign = paddle.concat([
paddle.gather(
- t, J, axis=0) if len(J) > 0 else paddle.zeros([0, t.shape[-1]])
+ t, J, axis=0)
+ if len(J) > 0 else paddle.zeros([0, t.shape[-1]])
for t, (_, J) in zip(target, match_indices)
])
return src_assign, target_assign
@@ -202,4 +206,4 @@ def forward(self,
self._get_loss_aux(boxes[:-1], logits[:-1], gt_bbox, gt_class,
self.num_classes, num_gts))
- return total_loss
\ No newline at end of file
+ return total_loss
diff --git a/transformer_courses/object_detection_DETR/models/ops.py b/transformer_courses/object_detection_DETR/models/ops.py
old mode 100644
new mode 100755
index 6fa4d8ad3..286a109ba
--- a/transformer_courses/object_detection_DETR/models/ops.py
+++ b/transformer_courses/object_detection_DETR/models/ops.py
@@ -693,12 +693,13 @@ def yolo_box(
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
- raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
+ raise TypeError(
+ "Attr ignore_thresh of yolo_box must be a float number")
if in_dygraph_mode():
attrs = ('anchors', anchors, 'class_num', class_num, 'conf_thresh',
- conf_thresh, 'downsample_ratio', downsample_ratio, 'clip_bbox',
- clip_bbox, 'scale_x_y', scale_x_y)
+ conf_thresh, 'downsample_ratio', downsample_ratio,
+ 'clip_bbox', clip_bbox, 'scale_x_y', scale_x_y)
boxes, scores = core.ops.yolo_box(x, origin_shape, *attrs)
return boxes, scores
else:
@@ -980,8 +981,8 @@ class number
score_threshold, 'nms_top_k', nms_top_k, 'nms_threshold',
nms_threshold, 'keep_top_k', keep_top_k, 'nms_eta', nms_eta,
'normalized', normalized)
- output, index, nms_rois_num = core.ops.multiclass_nms3(bboxes, scores,
- rois_num, *attrs)
+ output, index, nms_rois_num = core.ops.multiclass_nms3(
+ bboxes, scores, rois_num, *attrs)
if not return_index:
index = None
return output, nms_rois_num, index
@@ -1118,10 +1119,10 @@ def matrix_nms(bboxes,
if in_dygraph_mode():
attrs = ('background_label', background_label, 'score_threshold',
- score_threshold, 'post_threshold', post_threshold, 'nms_top_k',
- nms_top_k, 'gaussian_sigma', gaussian_sigma, 'use_gaussian',
- use_gaussian, 'keep_top_k', keep_top_k, 'normalized',
- normalized)
+ score_threshold, 'post_threshold', post_threshold,
+ 'nms_top_k', nms_top_k, 'gaussian_sigma', gaussian_sigma,
+ 'use_gaussian', use_gaussian, 'keep_top_k', keep_top_k,
+ 'normalized', normalized)
out, index, rois_num = core.ops.matrix_nms(bboxes, scores, *attrs)
if not return_index:
index = None
@@ -1505,9 +1506,9 @@ def generate_proposals(scores,
"""
if in_dygraph_mode():
assert return_rois_num, "return_rois_num should be True in dygraph mode."
- attrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN', post_nms_top_n,
- 'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta,
- 'pixel_offset', pixel_offset)
+ attrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN',
+ post_nms_top_n, 'nms_thresh', nms_thresh, 'min_size',
+ min_size, 'eta', eta, 'pixel_offset', pixel_offset)
rpn_rois, rpn_roi_probs, rpn_rois_num = core.ops.generate_proposals_v2(
scores, bbox_deltas, im_shape, anchors, variances, *attrs)
return rpn_rois, rpn_roi_probs, rpn_rois_num
@@ -1577,7 +1578,10 @@ def sigmoid_cross_entropy_with_logits(input,
return output
-def smooth_l1(input, label, inside_weight=None, outside_weight=None,
+def smooth_l1(input,
+ label,
+ inside_weight=None,
+ outside_weight=None,
sigma=None):
input_new = paddle.multiply(input, inside_weight)
label_new = paddle.multiply(label, inside_weight)
diff --git a/transformer_courses/object_detection_DETR/models/optimizer.py b/transformer_courses/object_detection_DETR/models/optimizer.py
old mode 100644
new mode 100755
index cc0895743..c5fddf3a9
--- a/transformer_courses/object_detection_DETR/models/optimizer.py
+++ b/transformer_courses/object_detection_DETR/models/optimizer.py
@@ -1,6 +1,7 @@
import paddle.nn as nn
import paddle.optimizer as optimizer
+
class PiecewiseDecay(object):
"""
Multi step learning rate decay
@@ -50,6 +51,7 @@ def __call__(self,
return optimizer.lr.PiecewiseDecay(boundary, value)
+
class LearningRate(object):
"""
Learning Rate configuration
@@ -58,16 +60,16 @@ class LearningRate(object):
base_lr (float): base learning rate
schedulers (list): learning rate schedulers
"""
- def __init__(self,
- base_lr=0.01,
- schedulers=[PiecewiseDecay()]):
+
+ def __init__(self, base_lr=0.01, schedulers=[PiecewiseDecay()]):
super(LearningRate, self).__init__()
self.base_lr = base_lr
self.schedulers = schedulers
-
+
def __call__(self, step_per_epoch):
- return self.schedulers(base_lr=self.base_lr,
- step_per_epoch=step_per_epoch)
+ return self.schedulers(
+ base_lr=self.base_lr, step_per_epoch=step_per_epoch)
+
class OptimizerBuilder():
"""
@@ -83,7 +85,7 @@ def __init__(self,
regularizer={'type': 'L2',
'factor': .0001},
optimizers={'type': 'Momentum',
- 'momentum': .9}):
+ 'momentum': .9}):
self.clip_grad_by_norm = clip_grad_by_norm
self.regularizer = regularizer
self.optimizers = optimizers
@@ -110,4 +112,4 @@ def __call__(self, learning_rate, params=None):
return op(learning_rate=learning_rate,
parameters=params,
grad_clip=grad_clip,
- **optim_args)
\ No newline at end of file
+ **optim_args)
diff --git a/transformer_courses/object_detection_DETR/models/positional_encoding.py b/transformer_courses/object_detection_DETR/models/positional_encoding.py
old mode 100644
new mode 100755
index 3ce73d9e5..2da162adc
--- a/transformer_courses/object_detection_DETR/models/positional_encoding.py
+++ b/transformer_courses/object_detection_DETR/models/positional_encoding.py
@@ -2,6 +2,7 @@
import paddle
import paddle.nn as nn
+
class PositionEmbedding(nn.Layer):
def __init__(self,
num_pos_feats=128,
@@ -80,4 +81,3 @@ def forward(self, mask):
return pos
else:
raise ValueError(f"not supported {self.embed_type}")
-
diff --git a/transformer_courses/object_detection_DETR/models/post_process.py b/transformer_courses/object_detection_DETR/models/post_process.py
old mode 100644
new mode 100755
index f584dbc16..493b6477e
--- a/transformer_courses/object_detection_DETR/models/post_process.py
+++ b/transformer_courses/object_detection_DETR/models/post_process.py
@@ -3,6 +3,7 @@
from utils.util import bbox_cxcywh_to_xyxy
+
class DETRBBoxPostProcess(object):
def __init__(self,
num_classes=80,
diff --git a/transformer_courses/object_detection_DETR/models/save_model.py b/transformer_courses/object_detection_DETR/models/save_model.py
old mode 100644
new mode 100755
index cd307fcb7..6cb59e386
--- a/transformer_courses/object_detection_DETR/models/save_model.py
+++ b/transformer_courses/object_detection_DETR/models/save_model.py
@@ -1,6 +1,7 @@
import os
import paddle
+
def save_model(model, optimizers, save_dir, save_name, last_epoch):
"""
save model into disk.
@@ -27,4 +28,4 @@ def save_model(model, optimizers, save_dir, save_name, last_epoch):
state_dict = optimizers.state_dict()
state_dict['last_epoch'] = last_epoch
paddle.save(state_dict, save_path + ".pdopt")
- print("Save checkpoint: {}".format(save_dir))
\ No newline at end of file
+ print("Save checkpoint: {}".format(save_dir))
diff --git a/transformer_courses/object_detection_DETR/models/transformer.py b/transformer_courses/object_detection_DETR/models/transformer.py
old mode 100644
new mode 100755
index 9b3b4efac..09e71e07c
--- a/transformer_courses/object_detection_DETR/models/transformer.py
+++ b/transformer_courses/object_detection_DETR/models/transformer.py
@@ -5,11 +5,13 @@
from .positional_encoding import PositionEmbedding
from .layers import MultiHeadAttention, _convert_attention_mask
-from . initializer import linear_init_, conv_init_, xavier_uniform_, normal_
+from .initializer import linear_init_, conv_init_, xavier_uniform_, normal_
+
def _get_clones(module, N):
return nn.LayerList([copy.deepcopy(module) for _ in range(N)])
+
# 先定义单个Encoder模块
class TransformerEncoderLayer(nn.Layer):
def __init__(self,
@@ -69,6 +71,7 @@ def forward(self, src, src_mask=None, pos_embed=None):
src = self.norm2(src)
return src
+
# DETR编码器由多个Encoder模块组成
class TransformerEncoder(nn.Layer):
def __init__(self, encoder_layer, num_layers, norm=None):
@@ -89,6 +92,7 @@ def forward(self, src, src_mask=None, pos_embed=None):
return output
+
# 先定义单个Decoder模块
class TransformerDecoderLayer(nn.Layer):
def __init__(self,
@@ -167,6 +171,7 @@ def forward(self,
tgt = self.norm3(tgt)
return tgt
+
# DETR解码器由多个Decoder模块组成
class TransformerDecoder(nn.Layer):
def __init__(self,
@@ -211,6 +216,7 @@ def forward(self,
return output.unsqueeze(0)
+
# 编码器和解码器组成DETR的transformer部分
class DETRTransformer(nn.Layer):
def __init__(self,
diff --git a/transformer_courses/object_detection_DETR/output/000000014439.jpg b/transformer_courses/object_detection_DETR/output/000000014439.jpg
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/requirements.txt b/transformer_courses/object_detection_DETR/requirements.txt
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/test_imgs/000000014439.jpg b/transformer_courses/object_detection_DETR/test_imgs/000000014439.jpg
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/test_model.py b/transformer_courses/object_detection_DETR/test_model.py
old mode 100644
new mode 100755
index c481125d5..e7bba6ad0
--- a/transformer_courses/object_detection_DETR/test_model.py
+++ b/transformer_courses/object_detection_DETR/test_model.py
@@ -8,6 +8,7 @@
from data.operators import *
from eval_model import get_categories, get_infer_results
+
class ImageFolder(Dataset):
def __init__(self,
dataset_dir=None,
@@ -74,7 +75,6 @@ def set_transform(self, transform):
def set_epoch(self, epoch_id):
self._epoch = epoch_id
-
def parse_dataset(self, ):
if not self.roidbs:
self.roidbs = self._load_images()
@@ -119,6 +119,8 @@ def get_imid2path(self):
def set_images(self, images):
self.image_dir = images
self.roidbs = self._load_images()
+
+
def _is_valid_file(f, extensions=('.jpg', '.jpeg', '.png', '.bmp')):
return f.lower().endswith(extensions)
@@ -135,6 +137,7 @@ def _make_dataset(dir):
images.append(path)
return images
+
def draw_bbox(image, bbox_res, im_id, catid2name, threshold=0.5):
"""
Draw bbox on image
@@ -160,11 +163,10 @@ def draw_bbox(image, bbox_res, im_id, catid2name, threshold=0.5):
ymax = ymin + h
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
- (xmin, ymin)],
+ (xmin, ymin)],
width=2,
fill=color)
-
# draw label
text = "{} {:.2f}".format(catid2name[catid], score)
tw, th = draw.textsize(text)
@@ -173,6 +175,8 @@ def draw_bbox(image, bbox_res, im_id, catid2name, threshold=0.5):
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
return image
+
+
def colormap(rgb=False):
"""
Get colormap
@@ -208,19 +212,45 @@ def colormap(rgb=False):
color_list = color_list[:, ::-1]
return color_list
+
def predict(images,
model,
draw_threshold=0.5,
output_dir='output',
- anno_path=None):
- status = {}
- dataset = ImageFolder(anno_path=anno_path)
+ anno_path=None):
+ status = {}
+ dataset = ImageFolder(anno_path=anno_path)
dataset.set_images(images)
- sample_transforms = [{Decode: {}}, {Resize: {'target_size': [800, 1333], 'keep_ratio': True}}, {NormalizeImage: {'is_scale': True, 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}, {Permute: {}}]
- batch_transforms = [{PadMaskBatch: {'pad_to_stride': -1, 'return_pad_mask': True}}]
- loader = BaseDataLoader(sample_transforms, batch_transforms, batch_size=1, shuffle=False, drop_last=False)(dataset, 0)
-
+ sample_transforms = [{
+ Decode: {}
+ }, {
+ Resize: {
+ 'target_size': [800, 1333],
+ 'keep_ratio': True
+ }
+ }, {
+ NormalizeImage: {
+ 'is_scale': True,
+ 'mean': [0.485, 0.456, 0.406],
+ 'std': [0.229, 0.224, 0.225]
+ }
+ }, {
+ Permute: {}
+ }]
+ batch_transforms = [{
+ PadMaskBatch: {
+ 'pad_to_stride': -1,
+ 'return_pad_mask': True
+ }
+ }]
+ loader = BaseDataLoader(
+ sample_transforms,
+ batch_transforms,
+ batch_size=1,
+ shuffle=False,
+ drop_last=False)(dataset, 0)
+
imid2path = dataset.get_imid2path()
anno_file = dataset.get_anno()
@@ -252,9 +282,11 @@ def predict(images,
status['original_image'] = np.array(image.copy())
end = start + bbox_num[i]
- bbox_res = batch_res['bbox'][start:end] if 'bbox' in batch_res else None
+ bbox_res = batch_res['bbox'][start:
+ end] if 'bbox' in batch_res else None
if bbox_res is not None:
- image = draw_bbox(image, bbox_res,int(im_id), catid2name, draw_threshold)
+ image = draw_bbox(image, bbox_res,
+ int(im_id), catid2name, draw_threshold)
status['result_image'] = np.array(image.copy())
# save image with detection
@@ -266,7 +298,9 @@ def predict(images,
print("Detection bbox results save in {}".format(save_name))
image.save(save_name, quality=95)
start = end
-def get_test_images(infer_img,infer_dir=None):
+
+
+def get_test_images(infer_img, infer_dir=None):
"""
Get image path list in TEST mode
"""
@@ -294,4 +328,4 @@ def get_test_images(infer_img,infer_dir=None):
assert len(images) > 0, "no image found in {}".format(infer_dir)
print("Found {} inference images in total.".format(len(images)))
- return images
\ No newline at end of file
+ return images
diff --git a/transformer_courses/object_detection_DETR/train_model.py b/transformer_courses/object_detection_DETR/train_model.py
old mode 100644
new mode 100755
index 708cb8fd0..85dc079bf
--- a/transformer_courses/object_detection_DETR/train_model.py
+++ b/transformer_courses/object_detection_DETR/train_model.py
@@ -85,49 +85,111 @@ def log(self, extras=None):
strs.append("{}: {}".format(k, str(v)))
return self.delimiter.join(strs)
-def train(model, start_epoch, epoch,dataset_dir,image_dir,anno_path):
+
+def train(model, start_epoch, epoch, dataset_dir, image_dir, anno_path):
status = {}
batch_size = 16
_nranks = dist.get_world_size()
_local_rank = dist.get_rank()
# 读取训练集
- dataset = COCODataSet(dataset_dir=dataset_dir, image_dir=image_dir,anno_path=anno_path,data_fields=['image', 'gt_bbox', 'gt_class', 'is_crowd'])
- sample_transforms = [{Decode: {}}, {RandomFlip: {'prob': 0.5}}, {RandomSelect: {'transforms1': [{RandomShortSideResize: {'short_side_sizes': [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800], 'max_size': 1333}}], 'transforms2': [{RandomShortSideResize: {'short_side_sizes': [400, 500, 600]}}, {RandomSizeCrop: {'min_size': 384, 'max_size': 600}}, {RandomShortSideResize: {'short_side_sizes': [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800], 'max_size': 1333}}]}}, {NormalizeImage: {'is_scale': True, 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}, {NormalizeBox: {}}, {BboxXYXY2XYWH: {}}, {Permute: {}}]
- batch_transforms = [{PadMaskBatch: {'pad_to_stride': -1, 'return_pad_mask': True}}]
- loader = BaseDataLoader(sample_transforms, batch_transforms, batch_size=2, shuffle=True, drop_last=True,collate_batch=False, use_shared_memory=False)(
- dataset, 0)
+ dataset = COCODataSet(
+ dataset_dir=dataset_dir,
+ image_dir=image_dir,
+ anno_path=anno_path,
+ data_fields=['image', 'gt_bbox', 'gt_class', 'is_crowd'])
+ sample_transforms = [{
+ Decode: {}
+ }, {
+ RandomFlip: {
+ 'prob': 0.5
+ }
+ }, {
+ RandomSelect: {
+ 'transforms1': [{
+ RandomShortSideResize: {
+ 'short_side_sizes':
+ [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800],
+ 'max_size': 1333
+ }
+ }],
+ 'transforms2': [{
+ RandomShortSideResize: {
+ 'short_side_sizes': [400, 500, 600]
+ }
+ }, {
+ RandomSizeCrop: {
+ 'min_size': 384,
+ 'max_size': 600
+ }
+ }, {
+ RandomShortSideResize: {
+ 'short_side_sizes':
+ [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800],
+ 'max_size': 1333
+ }
+ }]
+ }
+ }, {
+ NormalizeImage: {
+ 'is_scale': True,
+ 'mean': [0.485, 0.456, 0.406],
+ 'std': [0.229, 0.224, 0.225]
+ }
+ }, {
+ NormalizeBox: {}
+ }, {
+ BboxXYXY2XYWH: {}
+ }, {
+ Permute: {}
+ }]
+ batch_transforms = [{
+ PadMaskBatch: {
+ 'pad_to_stride': -1,
+ 'return_pad_mask': True
+ }
+ }]
+ loader = BaseDataLoader(
+ sample_transforms,
+ batch_transforms,
+ batch_size=2,
+ shuffle=True,
+ drop_last=True,
+ collate_batch=False,
+ use_shared_memory=False)(dataset, 0)
# build optimizer in train mode
steps_per_epoch = len(loader)
-
# 设置学习率、优化器
- schedulers = PiecewiseDecay(gamma=0.1,milestones=[400],use_warmup=False)
+ schedulers = PiecewiseDecay(gamma=0.1, milestones=[400], use_warmup=False)
lr_ = LearningRate(base_lr=0.0001, schedulers=schedulers)
- optimizer_ = OptimizerBuilder(clip_grad_by_norm=0.1, regularizer=False, optimizers={'type': 'AdamW', 'weight_decay': 0.0001})
+ optimizer_ = OptimizerBuilder(
+ clip_grad_by_norm=0.1,
+ regularizer=False,
+ optimizers={'type': 'AdamW',
+ 'weight_decay': 0.0001})
lr = lr_(steps_per_epoch)
- optimizers = optimizer_(lr,model.parameters())
+ optimizers = optimizer_(lr, model.parameters())
# initial default callbacks
- _callbacks = [LogPrinter(model,batch_size), Checkpointer(model,optimizers)]
+ _callbacks = [
+ LogPrinter(model, batch_size), Checkpointer(model, optimizers)
+ ]
_compose_callback = ComposeCallback(_callbacks)
-
if _nranks > 1:
model = paddle.DataParallel(model, find_unused_parameters=False)
-
status.update({
'epoch_id': start_epoch,
'step_id': 0,
'steps_per_epoch': len(loader)
})
-
+
status['batch_time'] = SmoothedValue(20, fmt='{avg:.4f}')
status['data_time'] = SmoothedValue(20, fmt='{avg:.4f}')
status['training_staus'] = TrainingStats(20)
-
for epoch_id in range(start_epoch, epoch):
status['mode'] = 'train'
status['epoch_id'] = epoch_id
@@ -159,4 +221,4 @@ def train(model, start_epoch, epoch,dataset_dir,image_dir,anno_path):
_compose_callback.on_step_end(status)
iter_tic = time.time()
- _compose_callback.on_epoch_end(status)
\ No newline at end of file
+ _compose_callback.on_epoch_end(status)
diff --git a/transformer_courses/object_detection_DETR/utils/__init__.py b/transformer_courses/object_detection_DETR/utils/__init__.py
old mode 100644
new mode 100755
diff --git a/transformer_courses/object_detection_DETR/utils/bbox_utils.py b/transformer_courses/object_detection_DETR/utils/bbox_utils.py
old mode 100644
new mode 100755
index aa55538f3..324edaab7
--- a/transformer_courses/object_detection_DETR/utils/bbox_utils.py
+++ b/transformer_courses/object_detection_DETR/utils/bbox_utils.py
@@ -171,7 +171,8 @@ def decode_yolo(box, anchor, downsample_ratio):
"""
x, y, w, h = box
na, grid_h, grid_w = x.shape[1:4]
- grid = make_grid(grid_h, grid_w, x.dtype).reshape((1, 1, grid_h, grid_w, 2))
+ grid = make_grid(grid_h, grid_w, x.dtype).reshape(
+ (1, 1, grid_h, grid_w, 2))
x1 = (x + grid[:, :, :, :, 0:1]) / grid_w
y1 = (y + grid[:, :, :, :, 1:2]) / grid_h
@@ -250,7 +251,8 @@ def bbox_iou(box1, box2, giou=False, diou=False, ciou=False, eps=1e-9):
# convex diagonal squared
c2 = cw**2 + ch**2 + eps
# center distance
- rho2 = ((px1 + px2 - gx1 - gx2)**2 + (py1 + py2 - gy1 - gy2)**2) / 4
+ rho2 = (
+ (px1 + px2 - gx1 - gx2)**2 + (py1 + py2 - gy1 - gy2)**2) / 4
if diou:
return iou - rho2 / c2
else:
@@ -365,8 +367,8 @@ def rbox2delta(proposals, gt, means=[0, 0, 0, 0, 0], stds=[1, 1, 1, 1, 1]):
coord = gt[..., 0:2] - proposals[..., 0:2]
dx = (np.cos(proposals[..., 4]) * coord[..., 0] + np.sin(proposals[..., 4])
* coord[..., 1]) / proposals_widths
- dy = (-np.sin(proposals[..., 4]) * coord[..., 0] + np.cos(proposals[..., 4])
- * coord[..., 1]) / proposals_heights
+ dy = (-np.sin(proposals[..., 4]) * coord[..., 0] +
+ np.cos(proposals[..., 4]) * coord[..., 1]) / proposals_heights
dw = np.log(gt_widths / proposals_widths)
dh = np.log(gt_heights / proposals_heights)
da = (gt_angle - proposals_angle)
@@ -457,7 +459,8 @@ def norm_angle(angle, range=[-np.pi / 4, np.pi]):
def cal_line_length(point1, point2):
import math
return math.sqrt(
- math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
+ math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1],
+ 2))
def get_best_begin_point_single(coordinate):
diff --git a/transformer_courses/object_detection_DETR/utils/load_model.py b/transformer_courses/object_detection_DETR/utils/load_model.py
old mode 100644
new mode 100755
index 595f6e054..aaa3676e7
--- a/transformer_courses/object_detection_DETR/utils/load_model.py
+++ b/transformer_courses/object_detection_DETR/utils/load_model.py
@@ -2,6 +2,7 @@
import numpy as np
import paddle
+
def match_state_dict(model_state_dict, weight_state_dict):
"""
Match between the model state dict and pretrained weight state dict.
@@ -47,11 +48,10 @@ def match(a, b):
model_value_shape = list(model_state_dict[model_key].shape)
if list(weight_value.shape) != model_value_shape:
- print(
- 'The shape {} in pretrained weight {} is unmatched with '
- 'the shape {} in model {}. And the weight {} will not be '
- 'loaded'.format(weight_value.shape, weight_key,
- model_value_shape, model_key, weight_key))
+ print('The shape {} in pretrained weight {} is unmatched with '
+ 'the shape {} in model {}. And the weight {} will not be '
+ 'loaded'.format(weight_value.shape, weight_key,
+ model_value_shape, model_key, weight_key))
continue
assert model_key not in result_state_dict
@@ -64,12 +64,14 @@ def match(a, b):
matched_keys[weight_key] = model_key
return result_state_dict
+
def _strip_postfix(path):
path, ext = os.path.splitext(path)
assert ext in ['', '.pdparams', '.pdopt', '.pdmodel'], \
"Unknown postfix {} from weights".format(ext)
return path
+
def load_pretrain_weight(model, pretrain_weight):
path = _strip_postfix(pretrain_weight)
if not (os.path.isdir(path) or os.path.isfile(path) or
@@ -88,7 +90,8 @@ def load_pretrain_weight(model, pretrain_weight):
model.set_dict(param_state_dict)
print('Finish loading model weights: {}'.format(weights_path))
+
def load_weights(model, weights):
start_epoch = 0
load_pretrain_weight(model, weights)
- print("Load weights {} to start training".format(weights))
\ No newline at end of file
+ print("Load weights {} to start training".format(weights))
diff --git a/transformer_courses/object_detection_DETR/utils/util.py b/transformer_courses/object_detection_DETR/utils/util.py
old mode 100644
new mode 100755
index 8c723ac3b..6241c98c9
--- a/transformer_courses/object_detection_DETR/utils/util.py
+++ b/transformer_courses/object_detection_DETR/utils/util.py
@@ -1,11 +1,13 @@
import paddle
import paddle.nn.functional as F
+
def bbox_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return paddle.stack(b, axis=-1)
+
def sigmoid_focal_loss(logit, label, normalizer=1.0, alpha=0.25, gamma=2.0):
prob = F.sigmoid(logit)
ce_loss = F.binary_cross_entropy_with_logits(logit, label, reduction="none")
@@ -23,6 +25,7 @@ def inverse_sigmoid(x, eps=1e-6):
x = x.clip(min=0., max=1.)
return paddle.log(x / (1 - x + eps) + eps)
+
class GIoULoss(object):
"""
Generalized Intersection over Union, see https://arxiv.org/abs/1902.09630
@@ -93,4 +96,4 @@ def __call__(self, pbox, gbox, iou_weight=1., loc_reweight=None):
loss = paddle.sum(giou * iou_weight)
else:
loss = paddle.mean(giou * iou_weight)
- return loss * self.loss_weight
\ No newline at end of file
+ return loss * self.loss_weight
diff --git a/transformer_courses/reading_comprehension_based_on_ernie/README.md b/transformer_courses/reading_comprehension_based_on_ernie/README.md
old mode 100644
new mode 100755
index 2fad0409a..aff829813
--- a/transformer_courses/reading_comprehension_based_on_ernie/README.md
+++ b/transformer_courses/reading_comprehension_based_on_ernie/README.md
@@ -59,5 +59,5 @@ export CUDA_VISIBLE_DEVICES=0
python ./evaluate.py --model_path ./ernie_rc.pdparams \
--max_seq_length 512 \
- --batch_size 12
-```
\ No newline at end of file
+ --batch_size 12
+```
diff --git a/transformer_courses/reading_comprehension_based_on_ernie/data_processor.py b/transformer_courses/reading_comprehension_based_on_ernie/data_processor.py
old mode 100644
new mode 100755
index 39a20e354..e2bc2db5c
--- a/transformer_courses/reading_comprehension_based_on_ernie/data_processor.py
+++ b/transformer_courses/reading_comprehension_based_on_ernie/data_processor.py
@@ -5,7 +5,7 @@
from paddlenlp.metrics.squad import squad_evaluate, compute_prediction
-def prepare_train_features(examples,tokenizer,doc_stride,max_seq_length):
+def prepare_train_features(examples, tokenizer, doc_stride, max_seq_length):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
@@ -13,10 +13,7 @@ def prepare_train_features(examples,tokenizer,doc_stride,max_seq_length):
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
- questions,
- contexts,
- stride=doc_stride,
- max_seq_len=max_seq_length)
+ questions, contexts, stride=doc_stride, max_seq_len=max_seq_length)
# Let's label those examples!
for i, tokenized_example in enumerate(tokenized_examples):
@@ -70,7 +67,9 @@ def prepare_train_features(examples,tokenizer,doc_stride,max_seq_length):
return tokenized_examples
-def prepare_validation_features(examples,tokenizer,doc_stride,max_seq_length):
+
+def prepare_validation_features(examples, tokenizer, doc_stride,
+ max_seq_length):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
@@ -78,10 +77,7 @@ def prepare_validation_features(examples,tokenizer,doc_stride,max_seq_length):
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
- questions,
- contexts,
- stride=doc_stride,
- max_seq_len=max_seq_length)
+ questions, contexts, stride=doc_stride, max_seq_len=max_seq_length)
# For validation, there is no need to compute start and end positions
for i, tokenized_example in enumerate(tokenized_examples):
@@ -100,4 +96,3 @@ def prepare_validation_features(examples,tokenizer,doc_stride,max_seq_length):
]
return tokenized_examples
-
diff --git a/transformer_courses/reading_comprehension_based_on_ernie/evaluate.py b/transformer_courses/reading_comprehension_based_on_ernie/evaluate.py
old mode 100644
new mode 100755
index f9acc573d..ae9110352
--- a/transformer_courses/reading_comprehension_based_on_ernie/evaluate.py
+++ b/transformer_courses/reading_comprehension_based_on_ernie/evaluate.py
@@ -12,23 +12,26 @@
from data_processor import prepare_train_features, prepare_validation_features
-
def evaluate(args, is_test=True):
# 加载模型
model_state = paddle.load(args.model_path)
- model = ErnieForQuestionAnswering.from_pretrained(args.model_name)
+ model = ErnieForQuestionAnswering.from_pretrained(args.model_name)
model.load_dict(model_state)
model.eval()
# 加载数据
- train_ds, dev_ds, test_ds = load_dataset('dureader_robust', splits=('train', 'dev', 'test'))
- tokenizer = paddlenlp.transformers.ErnieTokenizer.from_pretrained(args.model_name)
- test_trans_func = partial(prepare_validation_features,
- max_seq_length=args.max_seq_length,
- doc_stride=args.doc_stride,
- tokenizer=tokenizer)
+ train_ds, dev_ds, test_ds = load_dataset(
+ 'dureader_robust', splits=('train', 'dev', 'test'))
+ tokenizer = paddlenlp.transformers.ErnieTokenizer.from_pretrained(
+ args.model_name)
+ test_trans_func = partial(
+ prepare_validation_features,
+ max_seq_length=args.max_seq_length,
+ doc_stride=args.doc_stride,
+ tokenizer=tokenizer)
test_ds.map(test_trans_func, batched=True, num_workers=4)
- test_batch_sampler = paddle.io.BatchSampler(test_ds, batch_size=args.batch_size, shuffle=False)
+ test_batch_sampler = paddle.io.BatchSampler(
+ test_ds, batch_size=args.batch_size, shuffle=False)
test_batchify_fn = lambda samples, fn=Dict({
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
@@ -40,8 +43,6 @@ def evaluate(args, is_test=True):
batch_sampler=test_batch_sampler,
collate_fn=test_batchify_fn,
return_list=True)
-
-
all_start_logits = []
all_end_logits = []
@@ -81,23 +82,44 @@ def evaluate(args, is_test=True):
for example in test_data_loader.dataset.data:
count += 1
print()
- print('问题:',example['question'])
- print('原文:',''.join(example['context']))
- print('答案:',all_predictions[example['id']])
+ print('问题:', example['question'])
+ print('原文:', ''.join(example['context']))
+ print('答案:', all_predictions[example['id']])
if count >= 5:
break
model.train()
-if __name__=="__main__":
- parser = argparse.ArgumentParser(description="Reading Comprehension based on ERNIE.")
- parser.add_argument("--model_name", type=str, default="ernie-1.0", help="the model you want to load.")
- parser.add_argument("--max_seq_length", type=int, default=512, help="the max_seq_length of input sequence.")
- parser.add_argument("--doc_stride", type=int, default=128, help="doc_stride when processing data.")
- parser.add_argument("--batch_size", type=int, default=12, help="batch_size when model training.")
- parser.add_argument("--model_path", type=str, default="./ernie_rc.pdparams", help="the path of saving model.")
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Reading Comprehension based on ERNIE.")
+ parser.add_argument(
+ "--model_name",
+ type=str,
+ default="ernie-1.0",
+ help="the model you want to load.")
+ parser.add_argument(
+ "--max_seq_length",
+ type=int,
+ default=512,
+ help="the max_seq_length of input sequence.")
+ parser.add_argument(
+ "--doc_stride",
+ type=int,
+ default=128,
+ help="doc_stride when processing data.")
+ parser.add_argument(
+ "--batch_size",
+ type=int,
+ default=12,
+ help="batch_size when model training.")
+ parser.add_argument(
+ "--model_path",
+ type=str,
+ default="./ernie_rc.pdparams",
+ help="the path of saving model.")
args = parser.parse_args()
-
+
evaluate(args)
diff --git a/transformer_courses/reading_comprehension_based_on_ernie/train.py b/transformer_courses/reading_comprehension_based_on_ernie/train.py
old mode 100644
new mode 100755
index 8de6e09c3..069d50613
--- a/transformer_courses/reading_comprehension_based_on_ernie/train.py
+++ b/transformer_courses/reading_comprehension_based_on_ernie/train.py
@@ -10,6 +10,7 @@
from utils import CrossEntropyLossForRobust
from data_processor import prepare_train_features, prepare_validation_features
+
def evaluate(model, data_loader, is_test=False):
model.eval()
@@ -51,41 +52,48 @@ def evaluate(model, data_loader, is_test=False):
for example in data_loader.dataset.data:
count += 1
print()
- print('问题:',example['question'])
- print('原文:',''.join(example['context']))
- print('答案:',all_predictions[example['id']])
+ print('问题:', example['question'])
+ print('原文:', ''.join(example['context']))
+ print('答案:', all_predictions[example['id']])
if count >= 5:
break
model.train()
+
def train(args):
-
+
# 加载数据集
- train_ds, dev_ds, test_ds = load_dataset('dureader_robust', splits=('train', 'dev', 'test'))
+ train_ds, dev_ds, test_ds = load_dataset(
+ 'dureader_robust', splits=('train', 'dev', 'test'))
- tokenizer = paddlenlp.transformers.ErnieTokenizer.from_pretrained(args.model_name)
+ tokenizer = paddlenlp.transformers.ErnieTokenizer.from_pretrained(
+ args.model_name)
- train_trans_func = partial(prepare_train_features,
- max_seq_length=args.max_seq_length,
- doc_stride=args.doc_stride,
- tokenizer=tokenizer)
+ train_trans_func = partial(
+ prepare_train_features,
+ max_seq_length=args.max_seq_length,
+ doc_stride=args.doc_stride,
+ tokenizer=tokenizer)
train_ds.map(train_trans_func, batched=True, num_workers=4)
- dev_trans_func = partial(prepare_validation_features,
- max_seq_length=args.max_seq_length,
- doc_stride=args.doc_stride,
- tokenizer=tokenizer)
-
+ dev_trans_func = partial(
+ prepare_validation_features,
+ max_seq_length=args.max_seq_length,
+ doc_stride=args.doc_stride,
+ tokenizer=tokenizer)
+
dev_ds.map(dev_trans_func, batched=True, num_workers=4)
test_ds.map(dev_trans_func, batched=True, num_workers=4)
-
# 定义BatchSampler
- train_batch_sampler = paddle.io.DistributedBatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
- dev_batch_sampler = paddle.io.BatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
- test_batch_sampler = paddle.io.BatchSampler(test_ds, batch_size=args.batch_size, shuffle=False)
+ train_batch_sampler = paddle.io.DistributedBatchSampler(
+ train_ds, batch_size=args.batch_size, shuffle=True)
+ dev_batch_sampler = paddle.io.BatchSampler(
+ dev_ds, batch_size=args.batch_size, shuffle=False)
+ test_batch_sampler = paddle.io.BatchSampler(
+ test_ds, batch_size=args.batch_size, shuffle=False)
# 定义batchify_fn
train_batchify_fn = lambda samples, fn=Dict({
@@ -119,16 +127,15 @@ def train(args):
collate_fn=dev_batchify_fn,
return_list=True)
-
-
# 训练配置相关
num_training_steps = len(train_data_loader) * args.epochs
use_gpu = True if paddle.get_device().startswith("gpu") else False
if use_gpu:
- paddle.set_device('gpu:0')
+ paddle.set_device('gpu:0')
+
+ lr_scheduler = paddlenlp.transformers.LinearDecayWithWarmup(
+ args.learning_rate, num_training_steps, args.warmup_proportion)
- lr_scheduler = paddlenlp.transformers.LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_proportion)
-
model = ErnieForQuestionAnswering.from_pretrained(args.model_name)
decay_params = [
p.name for n, p in model.named_parameters()
@@ -140,7 +147,6 @@ def train(args):
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
-
# 训练代码
model.train()
criterion = CrossEntropyLossForRobust()
@@ -152,8 +158,9 @@ def train(args):
logits = model(input_ids=input_ids, token_type_ids=segment_ids)
loss = criterion(logits, (start_positions, end_positions))
- if global_step % 100 == 0 :
- print("global step %d, epoch: %d, batch: %d, loss: %.5f" % (global_step, epoch, step, loss))
+ if global_step % 100 == 0:
+ print("global step %d, epoch: %d, batch: %d, loss: %.5f" %
+ (global_step, epoch, step, loss))
loss.backward()
optimizer.step()
@@ -163,22 +170,59 @@ def train(args):
paddle.save(model.state_dict(), args.save_model_path)
paddle.save(model.state_dict(), args.save_opt_path)
evaluate(model=model, data_loader=dev_data_loader)
-
-
-if __name__=="__main__":
- parser = argparse.ArgumentParser(description="Reading Comprehension based on ERNIE.")
- parser.add_argument("--model_name", type=str, default="ernie-1.0", help="the model you want to load.")
- parser.add_argument("--epochs", type=int, default=2, help="the epochs of model training.")
- parser.add_argument("--max_seq_length", type=int, default=512, help="the max_seq_length of input sequence.")
- parser.add_argument("--doc_stride", type=int, default=128, help="doc_stride when processing data.")
- parser.add_argument("--batch_size", type=int, default=12, help="batch_size when model training.")
- parser.add_argument("--learning_rate", type=float, default=3e-5, help="learning_rate for model training.")
- parser.add_argument("--warmup_proportion", type=float, default=0.1, help="the proportion of performing warmup in all training steps.")
- parser.add_argument("--weight_decay", type=float, default=0.01, help="the weight_decay of model parameters.")
- parser.add_argument("--save_model_path", type=str, default="./ernie_rc.pdparams", help="the path of saving model.")
- parser.add_argument("--save_opt_path", type=str, default="./ernie_rc.pdopt", help="the path of saving optimizer")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Reading Comprehension based on ERNIE.")
+ parser.add_argument(
+ "--model_name",
+ type=str,
+ default="ernie-1.0",
+ help="the model you want to load.")
+ parser.add_argument(
+ "--epochs", type=int, default=2, help="the epochs of model training.")
+ parser.add_argument(
+ "--max_seq_length",
+ type=int,
+ default=512,
+ help="the max_seq_length of input sequence.")
+ parser.add_argument(
+ "--doc_stride",
+ type=int,
+ default=128,
+ help="doc_stride when processing data.")
+ parser.add_argument(
+ "--batch_size",
+ type=int,
+ default=12,
+ help="batch_size when model training.")
+ parser.add_argument(
+ "--learning_rate",
+ type=float,
+ default=3e-5,
+ help="learning_rate for model training.")
+ parser.add_argument(
+ "--warmup_proportion",
+ type=float,
+ default=0.1,
+ help="the proportion of performing warmup in all training steps.")
+ parser.add_argument(
+ "--weight_decay",
+ type=float,
+ default=0.01,
+ help="the weight_decay of model parameters.")
+ parser.add_argument(
+ "--save_model_path",
+ type=str,
+ default="./ernie_rc.pdparams",
+ help="the path of saving model.")
+ parser.add_argument(
+ "--save_opt_path",
+ type=str,
+ default="./ernie_rc.pdopt",
+ help="the path of saving optimizer")
args = parser.parse_args()
-
train(args)
diff --git a/transformer_courses/reading_comprehension_based_on_ernie/utils.py b/transformer_courses/reading_comprehension_based_on_ernie/utils.py
old mode 100644
new mode 100755
index e618e9372..00b32083c
--- a/transformer_courses/reading_comprehension_based_on_ernie/utils.py
+++ b/transformer_courses/reading_comprehension_based_on_ernie/utils.py
@@ -1,11 +1,12 @@
import paddle
+
class CrossEntropyLossForRobust(paddle.nn.Layer):
def __init__(self):
super(CrossEntropyLossForRobust, self).__init__()
def forward(self, y, label):
- start_logits, end_logits = y # both shape are [batch_size, seq_len]
+ start_logits, end_logits = y # both shape are [batch_size, seq_len]
start_position, end_position = label
start_position = paddle.unsqueeze(start_position, axis=-1)
end_position = paddle.unsqueeze(end_position, axis=-1)
@@ -18,6 +19,3 @@ def forward(self, y, label):
loss = (start_loss + end_loss) / 2
return loss
-
-
-
diff --git a/transformer_courses/sentiment_analysis_based_on_xlnet/README.md b/transformer_courses/sentiment_analysis_based_on_xlnet/README.md
old mode 100644
new mode 100755
diff --git a/transformer_courses/sentiment_analysis_based_on_xlnet/data_processor.py b/transformer_courses/sentiment_analysis_based_on_xlnet/data_processor.py
old mode 100644
new mode 100755
index f6710fda7..b97f4250d
--- a/transformer_courses/sentiment_analysis_based_on_xlnet/data_processor.py
+++ b/transformer_courses/sentiment_analysis_based_on_xlnet/data_processor.py
@@ -16,14 +16,13 @@
class IMDBDataset(Dataset):
def __init__(self, is_training=True):
self.data = self.load_imdb(is_training)
-
+
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return len(self.data)
-
def load_imdb(self, is_training):
# 将读取的数据放到列表data_set里
data_set = []
@@ -39,7 +38,10 @@ def load_imdb(self, is_training):
if bool(path_pattern.match(tf.name)):
sentence = tarf.extractfile(tf).read().decode()
sentence_label = 0 if label == 'neg' else 1
- data_set.append({"sentence":sentence, "label":sentence_label})
+ data_set.append({
+ "sentence": sentence,
+ "label": sentence_label
+ })
tf = tarf.next()
return data_set
@@ -75,4 +77,3 @@ def convert_example(example,
else:
return example['input_ids'], example['token_type_ids'], example[
'attention_mask']
-
diff --git a/transformer_courses/sentiment_analysis_based_on_xlnet/train.py b/transformer_courses/sentiment_analysis_based_on_xlnet/train.py
old mode 100644
new mode 100755
index e67ce552c..b7d4a37c6
--- a/transformer_courses/sentiment_analysis_based_on_xlnet/train.py
+++ b/transformer_courses/sentiment_analysis_based_on_xlnet/train.py
@@ -38,31 +38,30 @@ def evaluate(model, loss_fct, metric, data_loader):
print("eval loss: %f, acc: %s" % (np.average(losses), res))
model.train()
+
def train(args):
# 加载数据
- trainset=IMDBDataset(is_training=True)
+ trainset = IMDBDataset(is_training=True)
testset = IMDBDataset(is_training=False)
# 封装成MapDataSet的形式
- train_ds = MapDataset(trainset, label_list=[0,1])
- test_ds = MapDataset(testset, label_list=[0,1])
-
+ train_ds = MapDataset(trainset, label_list=[0, 1])
+ test_ds = MapDataset(testset, label_list=[0, 1])
+
# 定义XLNet的Tokenizer
tokenizer = XLNetTokenizer.from_pretrained(args.model_name_or_path)
trans_func = partial(
convert_example,
- tokenizer = tokenizer,
- label_list = train_ds.label_list,
- max_seq_length= args.max_seq_length
- )
+ tokenizer=tokenizer,
+ label_list=train_ds.label_list,
+ max_seq_length=args.max_seq_length)
# 构造train_data_loader 和 dev_data_loader
train_ds = train_ds.map(trans_func, lazy=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
- train_ds, batch_size = args.batch_size, shuffle=True
- )
+ train_ds, batch_size=args.batch_size, shuffle=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id, pad_right=False), # input
@@ -80,7 +79,8 @@ def train(args):
dev_ds = MapDataset(testset)
dev_ds = dev_ds.map(trans_func, lazy=True)
- dev_batch_sampler = paddle.io.BatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
+ dev_batch_sampler = paddle.io.BatchSampler(
+ dev_ds, batch_size=args.batch_size, shuffle=False)
dev_data_loader = DataLoader(
dataset=dev_ds,
@@ -99,7 +99,8 @@ def train(args):
paddle.set_device('gpu:0')
num_classes = len(train_ds.label_list)
- model = XLNetForSequenceClassification.from_pretrained(args.model_name_or_path, num_classes=num_classes)
+ model = XLNetForSequenceClassification.from_pretrained(
+ args.model_name_or_path, num_classes=num_classes)
#paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
@@ -184,26 +185,85 @@ def train(args):
tic_train += time.time() - tic_eval
-if __name__=="__main__":
- parser = argparse.ArgumentParser(description="Reading Comprehension based on ERNIE.")
- parser.add_argument("--model_name_or_path", type=str, default="xlnet-base-cased", help="the model you want to load.")
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Reading Comprehension based on ERNIE.")
+ parser.add_argument(
+ "--model_name_or_path",
+ type=str,
+ default="xlnet-base-cased",
+ help="the model you want to load.")
parser.add_argument("--task_name", type=str, default="sst-2")
- parser.add_argument("--num_train_epochs", type=int, default=2, help="the epochs of model training.")
- parser.add_argument("--max_seq_length", type=int, default=128, help="the max_seq_length of input sequence.")
- parser.add_argument("--doc_stride", type=int, default=128, help="doc_stride when processing data.")
- parser.add_argument("--batch_size", type=int, default=32, help="batch_size when model training.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-8, help="adam epsilon setting.")
- parser.add_argument("--learning_rate", type=float, default=2e-5, help="learning_rate for model training.")
- parser.add_argument("--max_grad_norm", type=float, default=1.0, help="max_grad_norm applying adjusting gradient.")
- parser.add_argument("--max_steps", type=int, default=-1, help="the max steps you want to train.")
- parser.add_argument("--logging_steps", type=int, default=100, help="how many steps to log info.")
- parser.add_argument("--save_steps", type=int, default=500, help="how many steps to save model.")
+ parser.add_argument(
+ "--num_train_epochs",
+ type=int,
+ default=2,
+ help="the epochs of model training.")
+ parser.add_argument(
+ "--max_seq_length",
+ type=int,
+ default=128,
+ help="the max_seq_length of input sequence.")
+ parser.add_argument(
+ "--doc_stride",
+ type=int,
+ default=128,
+ help="doc_stride when processing data.")
+ parser.add_argument(
+ "--batch_size",
+ type=int,
+ default=32,
+ help="batch_size when model training.")
+ parser.add_argument(
+ "--adam_epsilon",
+ type=float,
+ default=1e-8,
+ help="adam epsilon setting.")
+ parser.add_argument(
+ "--learning_rate",
+ type=float,
+ default=2e-5,
+ help="learning_rate for model training.")
+ parser.add_argument(
+ "--max_grad_norm",
+ type=float,
+ default=1.0,
+ help="max_grad_norm applying adjusting gradient.")
+ parser.add_argument(
+ "--max_steps",
+ type=int,
+ default=-1,
+ help="the max steps you want to train.")
+ parser.add_argument(
+ "--logging_steps",
+ type=int,
+ default=100,
+ help="how many steps to log info.")
+ parser.add_argument(
+ "--save_steps",
+ type=int,
+ default=500,
+ help="how many steps to save model.")
parser.add_argument("--seed", type=int, default=43, help="random seed.")
- parser.add_argument("--device", type=str, default="gpu", help="cpu or gpu selection.")
- parser.add_argument("--warmup_steps", type=int, default=0, help="warmup steps.")
- parser.add_argument("--warmup_proportion", type=float, default=0.1, help="the proportion of performing warmup in all training steps.")
- parser.add_argument("--weight_decay", type=float, default=0.0, help="the weight_decay of model parameters.")
- parser.add_argument("--output_dir", type=str, default="./tmp", help="the path of saving model.")
+ parser.add_argument(
+ "--device", type=str, default="gpu", help="cpu or gpu selection.")
+ parser.add_argument(
+ "--warmup_steps", type=int, default=0, help="warmup steps.")
+ parser.add_argument(
+ "--warmup_proportion",
+ type=float,
+ default=0.1,
+ help="the proportion of performing warmup in all training steps.")
+ parser.add_argument(
+ "--weight_decay",
+ type=float,
+ default=0.0,
+ help="the weight_decay of model parameters.")
+ parser.add_argument(
+ "--output_dir",
+ type=str,
+ default="./tmp",
+ help="the path of saving model.")
args = parser.parse_args()
diff --git a/transformer_courses/sentiment_analysis_based_on_xlnet/utils.py b/transformer_courses/sentiment_analysis_based_on_xlnet/utils.py
old mode 100644
new mode 100755
index 6187453fa..ab4ec8431
--- a/transformer_courses/sentiment_analysis_based_on_xlnet/utils.py
+++ b/transformer_courses/sentiment_analysis_based_on_xlnet/utils.py
@@ -2,11 +2,13 @@
import random
import numpy as np
+
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
paddle.seed(args.seed)
+
class Config():
def __init__(self):
self.task_name = "sst-2"
@@ -21,8 +23,8 @@ def __init__(self):
self.num_train_epochs = 3
self.max_steps = -1
self.logging_steps = 100
- self.save_steps=500
- self.seed=43
- self.device="gpu"
+ self.save_steps = 500
+ self.seed = 43
+ self.device = "gpu"
self.warmup_steps = 0
self.warmup_proportion = 0.1